text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# coding=utf-8 # Copyright 2022 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_accelerate_available, is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def get_some_linear_layer(model): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_4h_to_h if is_accelerate_available(): from accelerate import PartialState from accelerate.logging import get_logger logger = get_logger(__name__) _ = PartialState() if is_torch_available(): import torch import torch.nn as nn class LoRALayer(nn.Module): """Wraps a linear layer with LoRA-like adapter - Used for testing purposes only""" def __init__(self, module: nn.Module, rank: int): super().__init__() self.module = module self.adapter = nn.Sequential( nn.Linear(module.in_features, rank, bias=False), nn.Linear(rank, module.out_features, bias=False), ) small_std = (2.0 / (5 * min(module.in_features, module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight, std=small_std) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def forward(self, input, *args, **kwargs): return self.module(input, *args, **kwargs) + self.adapter(input) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class BaseMixedInt8Test(unittest.TestCase): # We keep the constants inside the init function and model loading inside setUp function # We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected) # Therefore here we use only bloom-1b3 to test our module model_name = "bigscience/bloom-1b7" # Constant values EXPECTED_RELATIVE_DIFFERENCE = ( 1.540025 # This was obtained on a Quadro RTX 8000 so the number might slightly change ) input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of the family.\n") # Expected values on a A10 EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n") MAX_NEW_TOKENS = 10 # Expected values with offload EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer based in") def setUp(self): # Models and tokenizer self.tokenizer = AutoTokenizer.from_pretrained(self.model_name) class MixedInt8Test(BaseMixedInt8Test): def setUp(self): super().setUp() # Models and tokenizer self.model_fp16 = AutoModelForCausalLM.from_pretrained( self.model_name, torch_dtype=torch.float16, device_map="auto" ) self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.model_fp16 del self.model_8bit gc.collect() torch.cuda.empty_cache() def test_get_keys_to_not_convert_trust_remote_code(self): r""" Test the `get_keys_to_not_convert` function with `trust_remote_code` models. """ from accelerate import init_empty_weights from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained( model_id, trust_remote_code=True, revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) with init_empty_weights(): model = AutoModelForCausalLM.from_config( config, trust_remote_code=True, code_revision="ada218f9a93b5f1c6dce48a4cc9ff01fcba431e7" ) self.assertEqual(get_keys_to_not_convert(model), ["transformer.wte"]) def test_get_keys_to_not_convert(self): r""" Test the `get_keys_to_not_convert` function. """ from accelerate import init_empty_weights from transformers import AutoModelForMaskedLM, Blip2ForConditionalGeneration, MptForCausalLM, OPTForCausalLM from transformers.integrations.bitsandbytes import get_keys_to_not_convert model_id = "mosaicml/mpt-7b" config = AutoConfig.from_pretrained(model_id, revision="72e5f594ce36f9cabfa2a9fd8f58b491eb467ee7") with init_empty_weights(): model = MptForCausalLM(config) # The order of the keys does not matter, so we sort them before comparing, same for the other tests. self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "transformer.wte"].sort()) model_id = "Salesforce/blip2-opt-2.7b" config = AutoConfig.from_pretrained(model_id, revision="1ef7f63a8f0a144c13fdca8103eb7b4691c74cec") with init_empty_weights(): model = Blip2ForConditionalGeneration(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["language_model.lm_head", "language_model.model.decoder.embed_tokens"].sort(), ) model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") with init_empty_weights(): model = OPTForCausalLM(config) self.assertEqual(get_keys_to_not_convert(model).sort(), ["lm_head", "model.decoder.embed_tokens"].sort()) model_id = "FacebookAI/roberta-large" config = AutoConfig.from_pretrained(model_id, revision="716877d372b884cad6d419d828bac6c85b3b18d9") with init_empty_weights(): model = AutoModelForMaskedLM.from_config(config) self.assertEqual( get_keys_to_not_convert(model).sort(), ["'roberta.embeddings.word_embeddings', 'lm_head', 'lm_head.decoder"].sort(), ) def test_quantization_config_json_serialization(self): r""" A simple test to check if the quantization config is correctly serialized and deserialized """ config = self.model_8bit.config self.assertTrue(hasattr(config, "quantization_config")) _ = config.to_dict() _ = config.to_diff_dict() _ = config.to_json_string() def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.model_8bit.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.model_8bit.config._pre_quantization_dtype == torch.float16) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from bitsandbytes.nn import Int8Params mem_fp16 = self.model_fp16.get_memory_footprint() mem_8bit = self.model_8bit.get_memory_footprint() self.assertAlmostEqual(mem_fp16 / mem_8bit, self.EXPECTED_RELATIVE_DIFFERENCE) self.assertTrue(get_some_linear_layer(self.model_8bit).weight.__class__ == Int8Params) def test_linear_are_8bit(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model and the class type of the linear layers of the converted models """ from transformers import T5PreTrainedModel self.model_fp16.get_memory_footprint() self.model_8bit.get_memory_footprint() for name, module in self.model_8bit.named_modules(): if isinstance(module, torch.nn.Linear): if name not in ["lm_head"] + T5PreTrainedModel._keep_in_fp32_modules: self.assertTrue(module.weight.dtype == torch.int8) def test_llm_skip(self): r""" A simple test to check if `llm_int8_skip_modules` works as expected """ import bitsandbytes as bnb quantization_config = BitsAndBytesConfig(load_in_8bit=True, llm_int8_skip_modules=["classifier"]) seq_classification_model = AutoModelForSequenceClassification.from_pretrained( "FacebookAI/roberta-large-mnli", quantization_config=quantization_config ) self.assertTrue(seq_classification_model.roberta.encoder.layer[0].output.dense.weight.dtype == torch.int8) self.assertTrue( isinstance(seq_classification_model.roberta.encoder.layer[0].output.dense, bnb.nn.Linear8bitLt) ) self.assertTrue(isinstance(seq_classification_model.classifier.dense, nn.Linear)) self.assertTrue(seq_classification_model.classifier.dense.weight.dtype != torch.int8) self.assertTrue(isinstance(seq_classification_model.classifier.out_proj, nn.Linear)) self.assertTrue(seq_classification_model.classifier.out_proj != torch.int8) def test_generate_quality(self): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = self.model_8bit.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_config(self): r""" Test that loading the model with the config is equivalent """ bnb_config = BitsAndBytesConfig() bnb_config.load_in_8bit = True model_8bit_from_config = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_8bit_from_config.generate( input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10 ) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality_dequantize(self): r""" Test that loading the model and dequantizing it produce correct results """ bnb_config = BitsAndBytesConfig(load_in_8bit=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, device_map="auto" ) model_8bit.dequantize() encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_8bit.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_raise_if_config_and_load_in_8bit(self): r""" Test that loading the model with the config and `load_in_8bit` raises an error """ bnb_config = BitsAndBytesConfig() with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=bnb_config, load_in_8bit=True, device_map="auto", llm_int8_enable_fp32_cpu_offload=True, ) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after converting it in 8-bit will throw an error. Checks also if other models are casted correctly. """ with self.assertRaises(ValueError): # Tries with `str` self.model_8bit.to("cpu") with self.assertRaises(ValueError): # Tries with a `dtype`` self.model_8bit.to(torch.float16) with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.to(torch.device("cuda:0")) with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.float() with self.assertRaises(ValueError): # Tries with a `device` self.model_8bit.half() # Test if we did not break anything encoded_input = self.tokenizer(self.input_text, return_tensors="pt") self.model_fp16 = self.model_fp16.to(torch.float32) _ = self.model_fp16.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Check this does not throw an error _ = self.model_fp16.to("cpu") # Check this does not throw an error _ = self.model_fp16.half() # Check this does not throw an error _ = self.model_fp16.float() def test_fp32_int8_conversion(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. """ model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-small", load_in_8bit=True, device_map="auto") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) def test_int8_serialization(self): r""" Test whether it is possible to serialize a model in 8-bit. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_int8_serialization_regression(self): r""" Test whether it is possible to serialize a model in 8-bit - using not safetensors """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, safe_serialization=False) # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, load_in_8bit=True, device_map="auto") linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_int8_serialization_sharded(self): r""" Test whether it is possible to serialize a model in 8-bit - sharded version. """ from bitsandbytes.nn import Int8Params with tempfile.TemporaryDirectory() as tmpdirname: self.model_8bit.save_pretrained(tmpdirname, max_shard_size="200MB") # check that the file `quantization_config` is present config = AutoConfig.from_pretrained(tmpdirname) self.assertTrue(hasattr(config, "quantization_config")) model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname) linear = get_some_linear_layer(model_from_saved) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model_from_saved.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/bloom-1b7-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class MixedInt8T5Test(unittest.TestCase): @classmethod def setUpClass(cls): cls.model_name = "google-t5/t5-small" cls.dense_act_model_name = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.input_text = "Translate in German: Hello, my dog is cute" def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ gc.collect() torch.cuda.empty_cache() def test_inference_without_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ from transformers import T5ForConditionalGeneration modules = T5ForConditionalGeneration._keep_in_fp32_modules T5ForConditionalGeneration._keep_in_fp32_modules = None # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) T5ForConditionalGeneration._keep_in_fp32_modules = modules def test_inference_with_keep_in_fp32(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ import bitsandbytes as bnb from transformers import T5ForConditionalGeneration # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) def test_inference_with_keep_in_fp32_serialized(self): r""" Test whether it is possible to mix both `int8` and `fp32` weights when using `keep_in_fp32_modules` correctly on a serialized model. `flan-t5-small` uses `T5DenseGatedActDense` whereas `google-t5/t5-small` uses `T5DenseReluDense`. We need to test both cases. """ import bitsandbytes as bnb from transformers import T5ForConditionalGeneration # test with `google-t5/t5-small` model = T5ForConditionalGeneration.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir) model = T5ForConditionalGeneration.from_pretrained(tmp_dir) # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q, bnb.nn.Linear8bitLt)) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) # test with `flan-t5-small` model = T5ForConditionalGeneration.from_pretrained( self.dense_act_model_name, load_in_8bit=True, device_map="auto" ) encoded_input = self.tokenizer(self.input_text, return_tensors="pt").to(0) _ = model.generate(**encoded_input) class MixedInt8ModelClassesTest(BaseMixedInt8Test): def setUp(self): super().setUp() # model_name self.model_name = "bigscience/bloom-560m" self.seq_to_seq_name = "google-t5/t5-small" # Different types of model self.base_model = AutoModel.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # Sequence classification model self.sequence_model = AutoModelForSequenceClassification.from_pretrained( self.model_name, load_in_8bit=True, device_map="auto" ) # CausalLM model self.model_8bit = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True, device_map="auto") # Seq2seq model self.seq_to_seq_model = AutoModelForSeq2SeqLM.from_pretrained( self.seq_to_seq_name, load_in_8bit=True, device_map="auto" ) def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.base_model del self.sequence_model del self.model_8bit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def test_correct_head_class(self): r""" A simple test to check if the last modules for some classes (AutoModelForCausalLM or SequenceClassification) are kept in their native class. """ from bitsandbytes.nn import Int8Params # last param of a base model should be a linear8bit module self.assertTrue(self.base_model.h[-1].mlp.dense_4h_to_h.weight.__class__ == Int8Params) # Other heads should be nn.Parameter self.assertTrue(self.model_8bit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class MixedInt8TestPipeline(BaseMixedInt8Test): def setUp(self): super().setUp() def tearDown(self): r""" TearDown function needs to be called at the end of each test to free the GPU memory and cache, also to avoid unexpected behaviors. Please see: https://discuss.pytorch.org/t/how-can-we-release-gpu-memory-cache/14530/27 """ del self.pipe gc.collect() torch.cuda.empty_cache() def test_pipeline(self): r""" The aim of this test is to verify that the mixed int8 is compatible with `pipeline` from transformers. Since we used pipline for inference speed benchmarking we want to make sure that this feature does not break anything on pipline. """ # self._clear_cuda_cache() self.pipe = pipeline( "text-generation", model=self.model_name, model_kwargs={"device_map": "auto", "load_in_8bit": True}, max_new_tokens=self.MAX_NEW_TOKENS, ) # Real second forward pass pipeline_output = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["generated_text"], self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class MixedInt8TestMultiGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def test_multi_gpu_loading(self): r""" This tests that the model has been loaded and can be used correctly on a multi-GPU setup. Let's just try to load a model on 2 GPUs and see if it works. The model we test has ~2GB of total, 3GB should suffice """ model_parallel = AutoModelForCausalLM.from_pretrained( self.model_name, load_in_8bit=True, device_map="balanced" ) # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()), {0, 1}) # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Second real batch output_parallel = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_parallel[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class MixedInt8TestCpuGpu(BaseMixedInt8Test): def setUp(self): super().setUp() def check_inference_correctness(self, model): # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_parallel = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation output_text = self.tokenizer.decode(output_parallel[0], skip_special_tokens=True) self.assertIn(output_text, self.EXPECTED_OUTPUTS) def test_cpu_gpu_loading_random_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a random `device_map`. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": "cpu", "transformer.h.1": "cpu", "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 1, "transformer.h.10": 0, "transformer.h.11": 1, "transformer.h.12": 0, "transformer.h.13": 0, "transformer.h.14": 1, "transformer.h.15": 0, "transformer.h.16": 0, "transformer.h.17": 1, "transformer.h.18": 1, "transformer.h.19": 0, "transformer.h.20": 1, "transformer.h.21": 1, "transformer.h.22": 0, "transformer.h.23": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time the device map is more organized than the test above and uses the abstraction `transformer.h` to encapsulate all the decoder layers. """ device_map = { "transformer.word_embeddings": "cpu", "transformer.word_embeddings_layernorm": "cpu", "lm_head": "cpu", "transformer.h": 0, "transformer.ln_f": 1, } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_disk_loading_custom_device_map(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map. """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } bnb_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True, load_in_8bit=True) with tempfile.TemporaryDirectory() as tmpdirname: # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, quantization_config=bnb_config, offload_folder=tmpdirname, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) def test_cpu_gpu_disk_loading_custom_device_map_kwargs(self): r""" A test to check is dispatching a model on cpu & gpu works correctly using a custom `device_map`. This time we also add `disk` on the device_map - using the kwargs directly instead of the quantization config """ device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": "cpu", "lm_head": 0, "transformer.h": 1, "transformer.ln_f": "disk", } with tempfile.TemporaryDirectory() as tmpdirname: # Load model model_8bit = AutoModelForCausalLM.from_pretrained( self.model_name, device_map=device_map, load_in_8bit=True, llm_int8_enable_fp32_cpu_offload=True, offload_folder=tmpdirname, ) # Check that the model has been correctly set on device 0, 1, and `cpu`. self.assertEqual(set(model_8bit.hf_device_map.values()), {0, 1, "cpu", "disk"}) self.check_inference_correctness(model_8bit) class MixedInt8TestTraining(BaseMixedInt8Test): def setUp(self): self.model_name = "facebook/opt-350m" super().setUp() def test_training(self): if version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.37.0"): self.skipTest(reason="This test requires bitsandbytes>=0.37.0") # Step 1: freeze all parameters model = AutoModelForCausalLM.from_pretrained(self.model_name, load_in_8bit=True) self.assertEqual(set(model.hf_device_map.values()), {torch.cuda.current_device()}) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability param.data = param.data.to(torch.float32) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(module)): module.q_proj = LoRALayer(module.q_proj, rank=16) module.k_proj = LoRALayer(module.k_proj, rank=16) module.v_proj = LoRALayer(module.v_proj, rank=16) # Step 3: dummy batch batch = self.tokenizer("Test batch ", return_tensors="pt").to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): out = model.forward(**batch) out.logits.norm().backward() for module in model.modules(): if isinstance(module, LoRALayer): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(module, nn.Embedding): self.assertTrue(module.weight.grad is None) class MixedInt8GPT2Test(MixedInt8Test): model_name = "openai-community/gpt2-xl" EXPECTED_RELATIVE_DIFFERENCE = 1.8720077507258357 EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I'm a big fan of") EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I'm a fan of the") # Expected values on a A10 EXPECTED_OUTPUTS.add("Hello my name is John Doe, and I am a member of the") def test_int8_from_pretrained(self): r""" Test whether loading a 8bit model from the Hub works as expected """ from bitsandbytes.nn import Int8Params model_id = "ybelkada/gpt2-xl-8bit" model = AutoModelForCausalLM.from_pretrained(model_id) linear = get_some_linear_layer(model) self.assertTrue(linear.weight.__class__ == Int8Params) self.assertTrue(hasattr(linear.weight, "SCB")) # generate encoded_input = self.tokenizer(self.input_text, return_tensors="pt") output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS)
transformers/tests/quantization/bnb/test_mixed_int8.py/0
{ "file_path": "transformers/tests/quantization/bnb/test_mixed_int8.py", "repo_id": "transformers", "token_count": 16033 }
407
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path check_dummies.PATH_TO_TRANSFORMERS = os.path.join(git_repo_path, "src", "transformers") DUMMY_CONSTANT = """ {0} = None """ DUMMY_CLASS = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ DUMMY_FUNCTION = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ class CheckDummiesTester(unittest.TestCase): def test_find_backend(self): no_backend = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")') self.assertIsNone(no_backend) simple_backend = find_backend(" if not is_tokenizers_available():") self.assertEqual(simple_backend, "tokenizers") backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") self.assertEqual(backend_with_underscore, "tensorflow_text") double_backend = find_backend(" if not (is_sentencepiece_available() and is_tokenizers_available()):") self.assertEqual(double_backend, "sentencepiece_and_tokenizers") double_backend_with_underscore = find_backend( " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" ) self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") triple_backend = find_backend( " if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):" ) self.assertEqual(triple_backend, "sentencepiece_and_tokenizers_and_vision") def test_read_init(self): objects = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("torch", objects) self.assertIn("tensorflow_text", objects) self.assertIn("sentencepiece_and_tokenizers", objects) # Likewise, we can't assert on the exact content of a key self.assertIn("BertModel", objects["torch"]) self.assertIn("TFBertModel", objects["tf"]) self.assertIn("FlaxBertModel", objects["flax"]) self.assertIn("BertModel", objects["torch"]) self.assertIn("TFBertTokenizer", objects["tensorflow_text"]) self.assertIn("convert_slow_tokenizer", objects["sentencepiece_and_tokenizers"]) def test_create_dummy_object(self): dummy_constant = create_dummy_object("CONSTANT", "'torch'") self.assertEqual(dummy_constant, "\nCONSTANT = None\n") dummy_function = create_dummy_object("function", "'torch'") self.assertEqual( dummy_function, "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" ) expected_dummy_class = """ class FakeClass(metaclass=DummyObject): _backends = 'torch' def __init__(self, *args, **kwargs): requires_backends(self, 'torch') """ dummy_class = create_dummy_object("FakeClass", "'torch'") self.assertEqual(dummy_class, expected_dummy_class) def test_create_dummy_files(self): expected_dummy_pytorch_file = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) """ dummy_files = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]}) self.assertEqual(dummy_files["torch"], expected_dummy_pytorch_file)
transformers/tests/repo_utils/test_check_dummies.py/0
{ "file_path": "transformers/tests/repo_utils/test_check_dummies.py", "repo_id": "transformers", "token_count": 1800 }
408
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import os import tempfile from transformers import is_torch_available from .utils.test_configuration_utils import config_common_kwargs class ConfigTester: def __init__(self, parent, config_class=None, has_text_modality=True, common_properties=None, **kwargs): self.parent = parent self.config_class = config_class self.has_text_modality = has_text_modality self.inputs_dict = kwargs self.common_properties = common_properties def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) common_properties = ( ["hidden_size", "num_attention_heads", "num_hidden_layers"] if self.common_properties is None else self.common_properties ) # Add common fields for text models if self.has_text_modality: common_properties.extend(["vocab_size"]) # Test that config has the common properties as getters for prop in common_properties: self.parent.assertTrue(hasattr(config, prop), msg=f"`{prop}` does not exist") # Test that config has the common properties as setter for idx, name in enumerate(common_properties): try: setattr(config, name, idx) self.parent.assertEqual( getattr(config, name), idx, msg=f"`{name} value {idx} expected, but was {getattr(config, name)}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass # Test if config class can be called with Config(prop_name=..) for idx, name in enumerate(common_properties): try: config = self.config_class(**{name: idx}) self.parent.assertEqual( getattr(config, name), idx, msg=f"`{name} value {idx} expected, but was {getattr(config, name)}" ) except NotImplementedError: # Some models might not be able to implement setters for common_properties # In that case, a NotImplementedError is raised pass def create_and_test_config_to_json_string(self): config = self.config_class(**self.inputs_dict) obj = json.loads(config.to_json_string()) for key, value in self.inputs_dict.items(): self.parent.assertEqual(obj[key], value) def create_and_test_config_to_json_file(self): config_first = self.config_class(**self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "config.json") config_first.to_json_file(json_file_path) config_second = self.config_class.from_json_file(json_file_path) self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) def create_and_test_config_from_and_save_pretrained(self): config_first = self.config_class(**self.inputs_dict) with tempfile.TemporaryDirectory() as tmpdirname: config_first.save_pretrained(tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname) self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) with self.parent.assertRaises(OSError): self.config_class.from_pretrained(f".{tmpdirname}") def create_and_test_config_from_and_save_pretrained_subfolder(self): config_first = self.config_class(**self.inputs_dict) subfolder = "test" with tempfile.TemporaryDirectory() as tmpdirname: sub_tmpdirname = os.path.join(tmpdirname, subfolder) config_first.save_pretrained(sub_tmpdirname) config_second = self.config_class.from_pretrained(tmpdirname, subfolder=subfolder) self.parent.assertEqual(config_second.to_dict(), config_first.to_dict()) def create_and_test_config_with_num_labels(self): config = self.config_class(**self.inputs_dict, num_labels=5) self.parent.assertEqual(len(config.id2label), 5) self.parent.assertEqual(len(config.label2id), 5) config.num_labels = 3 self.parent.assertEqual(len(config.id2label), 3) self.parent.assertEqual(len(config.label2id), 3) def check_config_can_be_init_without_params(self): if self.config_class.is_composition: with self.parent.assertRaises(ValueError): config = self.config_class() else: config = self.config_class() self.parent.assertIsNotNone(config) def check_config_arguments_init(self): kwargs = copy.deepcopy(config_common_kwargs) config = self.config_class(**kwargs) wrong_values = [] for key, value in config_common_kwargs.items(): if key == "torch_dtype": if not is_torch_available(): continue else: import torch if config.torch_dtype != torch.float16: wrong_values.append(("torch_dtype", config.torch_dtype, torch.float16)) elif getattr(config, key) != value: wrong_values.append((key, getattr(config, key), value)) if len(wrong_values) > 0: errors = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values]) raise ValueError(f"The following keys were not properly set in the config:\n{errors}") def run_common_tests(self): self.create_and_test_config_common_properties() self.create_and_test_config_to_json_string() self.create_and_test_config_to_json_file() self.create_and_test_config_from_and_save_pretrained() self.create_and_test_config_from_and_save_pretrained_subfolder() self.create_and_test_config_with_num_labels() self.check_config_can_be_init_without_params() self.check_config_arguments_init()
transformers/tests/test_configuration_common.py/0
{ "file_path": "transformers/tests/test_configuration_common.py", "repo_id": "transformers", "token_count": 2863 }
409
# coding=utf-8 # Copyright 2018 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import gc import json import math import os import random import re import subprocess import sys import tempfile import unittest from functools import partial from itertools import product from pathlib import Path from typing import Dict, List from unittest.mock import Mock, patch import numpy as np from huggingface_hub import HfFolder, ModelCard, delete_repo, list_repo_commits, list_repo_files from parameterized import parameterized from requests.exceptions import HTTPError from transformers import ( AutoTokenizer, IntervalStrategy, PretrainedConfig, TrainerCallback, TrainingArguments, get_polynomial_decay_schedule_with_warmup, is_torch_available, logging, ) from transformers.hyperparameter_search import ALL_HYPERPARAMETER_SEARCH_BACKENDS from transformers.testing_utils import ( ENDPOINT_STAGING, TOKEN, USER, CaptureLogger, LoggingLevel, TestCasePlus, backend_device_count, execute_subprocess_async, get_gpu_count, get_tests_dir, is_staging_test, require_accelerate, require_bitsandbytes, require_deepspeed, require_galore_torch, require_grokadamw, require_intel_extension_for_pytorch, require_liger_kernel, require_lomo, require_optuna, require_peft, require_ray, require_safetensors, require_sentencepiece, require_sigopt, require_tensorboard, require_tokenizers, require_torch, require_torch_accelerator, require_torch_bf16, require_torch_gpu, require_torch_multi_accelerator, require_torch_non_multi_accelerator, require_torch_non_multi_gpu, require_torch_tensorrt_fx, require_torch_tf32, require_torch_up_to_2_accelerators, require_torchdynamo, require_wandb, slow, torch_device, ) from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, HPSearchBackend, check_target_module_exists from transformers.training_args import OptimizerNames from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_safetensors_available, is_torchao_available, is_torchdistx_available, ) from transformers.utils.hp_naming import TrialShortNamer if is_torch_available(): import torch from torch import nn from torch.utils.data import IterableDataset import transformers.optimization from transformers import ( AutoModelForCausalLM, AutoModelForSequenceClassification, EarlyStoppingCallback, GlueDataset, GlueDataTrainingArguments, GPT2Config, GPT2LMHeadModel, LineByLineTextDataset, LlamaConfig, LlamaForCausalLM, PreTrainedModel, Trainer, TrainerState, ) from transformers.trainer_pt_utils import AcceleratorConfig if is_safetensors_available(): import safetensors.torch # for version specific tests in TrainerIntegrationTest require_accelerate_version_min_0_28 = partial(require_accelerate, min_version="0.28") require_accelerate_version_min_0_30 = partial(require_accelerate, min_version="0.30") GRAD_ACCUM_KWARGS_VERSION_AVAILABLE = is_accelerate_available("0.28") if is_accelerate_available(): from accelerate import Accelerator from accelerate.state import AcceleratorState PATH_SAMPLE_TEXT = f"{get_tests_dir()}/fixtures/sample_text.txt" class RegressionDataset: def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): np.random.seed(seed) self.label_names = ["labels"] if label_names is None else label_names self.length = length self.x = np.random.normal(size=(length,)).astype(np.float32) self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names] self.ys = [y.astype(np.float32) for y in self.ys] def __len__(self): return self.length def __getitem__(self, i): result = {name: y[i] for name, y in zip(self.label_names, self.ys)} result["input_x"] = self.x[i] return result # Converting Bytes to Megabytes def bytes2megabytes(x): return int(x / 2**20) # Copied from acclerate: https://github.com/huggingface/accelerate/blob/ee163b66fb7848892519e804688cb4ae981aacbe/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py#L40C1-L73C68 class TorchTracemalloc: def __enter__(self): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() return self def __exit__(self, *exc): gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() self.used = bytes2megabytes(self.end - self.begin) self.peaked = bytes2megabytes(self.peak - self.begin) @dataclasses.dataclass class RegressionTrainingArguments(TrainingArguments): a: float = 0.0 b: float = 0.0 keep_report_to: bool = False def __post_init__(self): super().__post_init__() # save resources not dealing with reporting unless specified (also avoids the warning when it's not set) # can be explicitly disabled via `keep_report_to` if not self.keep_report_to: self.report_to = [] class RepeatDataset: def __init__(self, x, length=64): self.x = x self.length = length def __len__(self): return self.length def __getitem__(self, i): return {"input_ids": self.x, "labels": self.x} class DynamicShapesDataset: def __init__(self, length=64, seed=42, batch_size=8): self.length = length np.random.seed(seed) sizes = np.random.randint(1, 20, (length // batch_size,)) # For easy batching, we make every batch_size consecutive samples the same size. self.xs = [np.random.normal(size=(s,)).astype(np.float32) for s in sizes.repeat(batch_size)] self.ys = [np.random.normal(size=(s,)).astype(np.float32) for s in sizes.repeat(batch_size)] def __len__(self): return self.length def __getitem__(self, i): return {"input_x": self.xs[i], "labels": self.ys[i]} class AlmostAccuracy: def __init__(self, thresh=0.25): self.thresh = thresh def __call__(self, eval_pred): predictions, labels = eval_pred true = np.abs(predictions - labels) <= self.thresh return {"accuracy": true.astype(np.float32).mean().item()} class AlmostAccuracyBatched: def __init__(self, thresh=0.25): self.thresh = thresh self.batch_acc = [] def __call__(self, eval_pred, compute_result): predictions, labels = eval_pred if isinstance(predictions, tuple): predictions = predictions[0] if isinstance(labels, tuple): labels = labels[0] batch_size = len(predictions) true = torch.abs(predictions - labels) <= self.thresh acc = true.type(torch.FloatTensor).mean().item() self.batch_acc.extend([acc] * batch_size) if compute_result: result = {"accuracy": np.mean(self.batch_acc).item()} self.batch_acc = [] return result class RegressionModelConfig(PretrainedConfig): def __init__(self, a=0, b=0, double_output=False, random_torch=True, **kwargs): super().__init__(**kwargs) self.a = a self.b = b self.double_output = double_output self.random_torch = random_torch self.hidden_size = 1 if is_torch_available(): class SampleIterableDataset(IterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names) def __iter__(self): for i in range(len(self.dataset)): yield self.dataset[i] class FiniteIterableDataset(SampleIterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): super().__init__(a, b, length, seed, label_names) self.current_sample = 0 def __iter__(self): while self.current_sample < len(self.dataset): yield self.dataset[self.current_sample] self.current_sample += 1 class MultiLoader: def __init__(self, loaders): self.loaders = loaders def __len__(self): return sum(len(loader) for loader in self.loaders) def __iter__(self): for loader in self.loaders: yield from loader class CustomDataloaderTrainer(Trainer): def get_train_dataloader(self): dataloaders = [super().get_train_dataloader(), super().get_train_dataloader()] return MultiLoader(dataloaders) def get_eval_dataloader(self, eval_dataset): dataloaders = [super().get_eval_dataloader(eval_dataset), super().get_eval_dataloader(eval_dataset)] return MultiLoader(dataloaders) class RegressionModel(nn.Module): def __init__(self, a=0, b=0, double_output=False): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.double_output = double_output self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionDictModel(nn.Module): def __init__(self, a=0, b=0): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b result = {"output": y} if labels is not None: result["loss"] = nn.functional.mse_loss(y, labels) return result class RegressionPreTrainedModel(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" def __init__(self, config): super().__init__(config) self.a = nn.Parameter(torch.tensor(config.a).float()) self.b = nn.Parameter(torch.tensor(config.b).float()) self.double_output = config.double_output def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionPreTrainedModelWithGradientCheckpointing(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" supports_gradient_checkpointing = True def __init__(self, config): super().__init__(config) self.layers = nn.ModuleList([nn.Linear(config.hidden_size, config.hidden_size) for _ in range(4)]) self.head = nn.Linear(config.hidden_size, 1) self.gradient_checkpointing = False self.double_output = config.double_output def forward(self, input_x, labels=None, **kwargs): y = input_x.unsqueeze(0) for layer in self.layers: if self.training and self.gradient_checkpointing: outputs = self._gradient_checkpointing_func(layer.__call__, y) else: outputs = layer(y) y = outputs * 3 logits = self.head(y) if labels is None: return (logits, logits) if self.double_output else (logits,) loss = nn.functional.mse_loss(logits, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionRandomPreTrainedModel(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" def __init__(self, config): super().__init__(config) self.a = nn.Parameter(torch.tensor(config.a).float()) self.b = nn.Parameter(torch.tensor(config.b).float()) self.random_torch = config.random_torch def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if self.random_torch: torch_rand = torch.randn(1).squeeze() np_rand = np.random.rand() rand_rand = random.random() if self.random_torch: y += 0.05 * torch_rand y += 0.05 * torch.tensor(np_rand + rand_rand) if labels is None: return (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y) class TstLayer(nn.Module): def __init__(self, hidden_size): super().__init__() self.linear1 = nn.Linear(hidden_size, hidden_size) self.ln1 = nn.LayerNorm(hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.ln2 = nn.LayerNorm(hidden_size) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): h = self.ln1(nn.functional.relu(self.linear1(x))) h = nn.functional.relu(self.linear2(x)) return self.ln2(x + h + self.bias) def get_regression_trainer( a=0, b=0, double_output=False, train_len=64, eval_len=64, pretrained=True, keep_report_to=False, **kwargs ): label_names = kwargs.get("label_names", None) gradient_checkpointing = kwargs.get("gradient_checkpointing", False) train_dataset = RegressionDataset(length=train_len, label_names=label_names) eval_dataset = RegressionDataset(length=eval_len, label_names=label_names) model_init = kwargs.pop("model_init", None) if model_init is not None: model = None else: if pretrained: config = RegressionModelConfig(a=a, b=b, double_output=double_output) # We infer the correct model class if one uses gradient_checkpointing or not target_cls = ( RegressionPreTrainedModel if not gradient_checkpointing else RegressionPreTrainedModelWithGradientCheckpointing ) model = target_cls(config) else: model = RegressionModel(a=a, b=b, double_output=double_output) compute_metrics = kwargs.pop("compute_metrics", None) data_collator = kwargs.pop("data_collator", None) optimizers = kwargs.pop("optimizers", (None, None)) output_dir = kwargs.pop("output_dir", "./regression") preprocess_logits_for_metrics = kwargs.pop("preprocess_logits_for_metrics", None) args = RegressionTrainingArguments(output_dir, a=a, b=b, keep_report_to=keep_report_to, **kwargs) return Trainer( model, args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics, optimizers=optimizers, model_init=model_init, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) class TrainerIntegrationCommon: def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True, safe_weights=True): weights_file = WEIGHTS_NAME if not safe_weights else SAFE_WEIGHTS_NAME file_list = [weights_file, "training_args.bin", "optimizer.pt", "scheduler.pt", "trainer_state.json"] if is_pretrained: file_list.append("config.json") for step in range(freq, total, freq): checkpoint = os.path.join(output_dir, f"checkpoint-{step}") self.assertTrue(os.path.isdir(checkpoint)) for filename in file_list: self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename))) def check_best_model_has_been_loaded( self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True, safe_weights=True ): checkpoint = os.path.join(output_dir, f"checkpoint-{(total // freq) * freq}") log_history = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json")).log_history values = [d[metric] for d in log_history] best_value = max(values) if greater_is_better else min(values) best_checkpoint = (values.index(best_value) + 1) * freq checkpoint = os.path.join(output_dir, f"checkpoint-{best_checkpoint}") if is_pretrained: best_model = RegressionPreTrainedModel.from_pretrained(checkpoint) best_model.to(trainer.args.device) else: best_model = RegressionModel() if not safe_weights: state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME)) else: state_dict = safetensors.torch.load_file(os.path.join(checkpoint, SAFE_WEIGHTS_NAME)) best_model.load_state_dict(state_dict) best_model.to(trainer.args.device) self.assertTrue(torch.allclose(best_model.a, trainer.model.a)) self.assertTrue(torch.allclose(best_model.b, trainer.model.b)) metrics = trainer.evaluate() self.assertEqual(metrics[metric], best_value) def check_trainer_state_are_the_same(self, trainer_state, trainer_state1): # We'll pop things so operate on copies. state = trainer_state.copy() state1 = trainer_state1.copy() # Log history main contain different logs for the time metrics (after resuming a training). log_history = state.pop("log_history", None) log_history1 = state1.pop("log_history", None) self.assertEqual(state, state1) skip_log_keys = ["train_runtime", "train_samples_per_second", "train_steps_per_second", "train_loss"] for log, log1 in zip(log_history, log_history1): for key in skip_log_keys: _ = log.pop(key, None) _ = log1.pop(key, None) self.assertEqual(log, log1) def convert_to_sharded_checkpoint(self, folder, save_safe=True, load_safe=True): # Converts a checkpoint of a regression model to a sharded checkpoint. if load_safe: loader = safetensors.torch.load_file weights_file = os.path.join(folder, SAFE_WEIGHTS_NAME) else: loader = torch.load weights_file = os.path.join(folder, WEIGHTS_NAME) if save_safe: extension = "safetensors" saver = safetensors.torch.save_file index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME) shard_name = SAFE_WEIGHTS_NAME else: extension = "bin" saver = torch.save index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) shard_name = WEIGHTS_NAME state_dict = loader(weights_file) os.remove(weights_file) keys = list(state_dict.keys()) shard_files = [ shard_name.replace(f".{extension}", f"-{idx+1:05d}-of-{len(keys):05d}.{extension}") for idx in range(len(keys)) ] index = {"metadata": {}, "weight_map": {key: shard_files[i] for i, key in enumerate(keys)}} with open(index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) for param_name, shard_file in zip(keys, shard_files): saver({param_name: state_dict[param_name]}, os.path.join(folder, shard_file)) @require_torch @require_sentencepiece @require_tokenizers class TrainerIntegrationPrerunTest(TestCasePlus, TrainerIntegrationCommon): """ Only tests that want to tap into the auto-pre-run 2 trainings: - self.default_trained_model - self.alternate_trained_model directly, or via check_trained_model """ def setUp(self): super().setUp() args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size trainer = get_regression_trainer(learning_rate=0.1) trainer.train() self.default_trained_model = (trainer.model.a, trainer.model.b) trainer = get_regression_trainer(learning_rate=0.1, seed=314) trainer.train() self.alternate_trained_model = (trainer.model.a, trainer.model.b) def check_trained_model(self, model, alternate_seed=False): # Checks a training seeded with learning_rate = 0.1 (a, b) = self.alternate_trained_model if alternate_seed else self.default_trained_model self.assertTrue(torch.allclose(model.a, a)) self.assertTrue(torch.allclose(model.b, b)) def test_reproducible_training(self): # Checks that training worked, model trained and seed made a reproducible training. trainer = get_regression_trainer(learning_rate=0.1) trainer.train() self.check_trained_model(trainer.model) # Checks that a different seed gets different (reproducible) results. trainer = get_regression_trainer(learning_rate=0.1, seed=314) trainer.train() self.check_trained_model(trainer.model, alternate_seed=True) def test_trainer_with_datasets(self): import datasets np.random.seed(42) x = np.random.normal(size=(64,)).astype(np.float32) y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,)).astype(np.float32) train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y}) # Base training. Should have the same results as test_reproducible_training model = RegressionModel() args = TrainingArguments("./regression", learning_rate=0.1, report_to="none") trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) # Can return tensors. train_dataset.set_format(type="torch", dtype=torch.float32) model = RegressionModel() trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) # Adding one column not used by the model should have no impact z = np.random.normal(size=(64,)).astype(np.float32) train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y, "extra": z}) model = RegressionModel() trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) def test_model_init(self): train_dataset = RegressionDataset() args = TrainingArguments("./regression", learning_rate=0.1, report_to="none") trainer = Trainer(args=args, train_dataset=train_dataset, model_init=lambda: RegressionModel()) trainer.train() self.check_trained_model(trainer.model) # Re-training should restart from scratch, thus lead the same results. trainer.train() self.check_trained_model(trainer.model) # Re-training should restart from scratch, thus lead the same results and new seed should be used. trainer.args.seed = 314 trainer.train() self.check_trained_model(trainer.model, alternate_seed=True) def test_gradient_accumulation(self): # Training with half the batch size but accumulation steps as 2 should give the same results. trainer = get_regression_trainer( gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1 ) trainer.train() self.check_trained_model(trainer.model) def test_gradient_checkpointing(self): trainer = get_regression_trainer( per_device_train_batch_size=1, learning_rate=0.1, gradient_checkpointing=True, gradient_checkpointing_kwargs={"use_reentrant": False}, ) previous_params = {k: v.detach().clone() for k, v in trainer.model.named_parameters()} trainer.train() # Check if model weights have been updated for k, v in trainer.model.named_parameters(): self.assertFalse( torch.allclose(previous_params[k], v, rtol=1e-4, atol=1e-4), f"Model weights for {k} have not been updated", ) def test_training_loss(self): n_gpus = max(1, backend_device_count(torch_device)) # With even logs trainer = get_regression_trainer(logging_steps=64 / (8 * n_gpus)) trainer.train() log_history = trainer.state.log_history losses = [log["loss"] for log in log_history if "loss" in log] train_loss = log_history[-1]["train_loss"] self.assertAlmostEqual(sum(losses) / len(losses), train_loss, places=4) # With uneven logs trainer = get_regression_trainer(logging_steps=5) trainer.train() log_history = trainer.state.log_history # Training loss should be the same as before new_train_loss = log_history[-1]["train_loss"] self.assertAlmostEqual(train_loss, new_train_loss, places=4) def test_custom_optimizer(self): train_dataset = RegressionDataset() args = TrainingArguments("./regression", report_to="none") model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=1.0) lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1.0) trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler)) trainer.train() (a, b) = self.default_trained_model self.assertFalse(torch.allclose(trainer.model.a, a)) self.assertFalse(torch.allclose(trainer.model.b, b)) self.assertEqual(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 1.0) def test_lr_scheduler_kwargs(self): # test scheduler kwargs passed via TrainingArguments train_dataset = RegressionDataset() model = RegressionModel() num_steps, num_warmup_steps = 10, 2 extra_kwargs = {"power": 5.0, "lr_end": 1e-5} # Non-default arguments args = TrainingArguments( "./regression", lr_scheduler_type="polynomial", lr_scheduler_kwargs=extra_kwargs, learning_rate=0.2, warmup_steps=num_warmup_steps, report_to="none", ) trainer = Trainer(model, args, train_dataset=train_dataset) trainer.create_optimizer_and_scheduler(num_training_steps=num_steps) # Checking that the scheduler was created self.assertIsNotNone(trainer.lr_scheduler) # Checking that the correct args were passed sched1 = trainer.lr_scheduler sched2 = get_polynomial_decay_schedule_with_warmup( trainer.optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_steps, **extra_kwargs ) self.assertEqual(sched1.lr_lambdas[0].args, sched2.lr_lambdas[0].args) self.assertEqual(sched1.lr_lambdas[0].keywords, sched2.lr_lambdas[0].keywords) def test_cosine_with_min_lr_scheduler(self): train_dataset = RegressionDataset() model = RegressionModel() num_steps, num_warmup_steps = 10, 2 extra_kwargs = {"min_lr": 1e-5} # Non-default arguments args = TrainingArguments( "./regression", lr_scheduler_type="cosine_with_min_lr", lr_scheduler_kwargs=extra_kwargs, learning_rate=0.2, warmup_steps=num_warmup_steps, report_to="none", ) trainer = Trainer(model, args, train_dataset=train_dataset) trainer.create_optimizer_and_scheduler(num_training_steps=num_steps) # Checking that the scheduler was created self.assertIsNotNone(trainer.lr_scheduler) # Check the last learning rate for _ in range(num_steps): trainer.lr_scheduler.step() self.assertEqual(trainer.lr_scheduler.get_last_lr()[0], 1e-5) def test_reduce_lr_on_plateau_args(self): # test passed arguments for a custom ReduceLROnPlateau scheduler train_dataset = RegressionDataset(length=64) eval_dataset = RegressionDataset(length=64) args = TrainingArguments( "./regression", eval_strategy="epoch", metric_for_best_model="eval_loss", report_to="none", ) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=1.0) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2, patience=5, cooldown=2) trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, optimizers=(optimizer, lr_scheduler) ) trainer.train() self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) self.assertEqual(trainer.lr_scheduler.factor, 0.2) self.assertEqual(trainer.lr_scheduler.patience, 5) self.assertEqual(trainer.lr_scheduler.cooldown, 2) def test_reduce_lr_on_plateau(self): # test the ReduceLROnPlateau scheduler class TrainerWithLRLogs(Trainer): def log(self, logs): # the LR is computed after metrics and does not exist for the first epoch if hasattr(self.lr_scheduler, "_last_lr"): logs["learning_rate"] = self.lr_scheduler._last_lr[0] super().log(logs) train_dataset = RegressionDataset(length=64) eval_dataset = RegressionDataset(length=64) args = TrainingArguments( "./regression", lr_scheduler_type="reduce_lr_on_plateau", eval_strategy="epoch", metric_for_best_model="eval_loss", num_train_epochs=10, learning_rate=0.2, report_to="none", ) model = RegressionModel() trainer = TrainerWithLRLogs(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) patience = trainer.lr_scheduler.patience logs = trainer.state.log_history[1:] best_loss = logs[0]["eval_loss"] bad_epochs = 0 for i, log in enumerate(logs[:-1]): # Compare learning rate to next epoch's loss = log["eval_loss"] just_decreased = False if loss > best_loss: bad_epochs += 1 if bad_epochs > patience: self.assertLess(logs[i + 1]["learning_rate"], log["learning_rate"]) just_decreased = True bad_epochs = 0 else: best_loss = loss bad_epochs = 0 if not just_decreased: self.assertEqual(logs[i + 1]["learning_rate"], log["learning_rate"]) def test_adafactor_lr_none(self): # test the special case where lr=None, since Trainer can't not have lr_scheduler from transformers.optimization import Adafactor, AdafactorSchedule train_dataset = RegressionDataset() args = TrainingArguments("./regression", report_to="none") model = RegressionModel() optimizer = Adafactor(model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler)) trainer.train() (a, b) = self.default_trained_model self.assertFalse(torch.allclose(trainer.model.a, a)) self.assertFalse(torch.allclose(trainer.model.b, b)) self.assertGreater(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 0) @require_torch_accelerator @require_torch_bf16 def test_mixed_bf16(self): # very basic test trainer = get_regression_trainer(learning_rate=0.1, bf16=True) trainer.train() self.check_trained_model(trainer.model) # --bf16 --half_precision_backend apex can't be used together with self.assertRaises(ValueError): trainer = get_regression_trainer(learning_rate=0.1, bf16=True, half_precision_backend="apex") # will add more specific tests once there are some bugs to fix @require_torch_gpu @require_torch_tf32 def test_tf32(self): # very basic test trainer = get_regression_trainer(learning_rate=0.1, tf32=True) trainer.train() self.check_trained_model(trainer.model) @require_torch @require_sentencepiece @require_tokenizers class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon): def setUp(self): super().setUp() args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_trainer_works_with_dict(self): # Edge case because Apex with mode O2 will change our models to return dicts. This test checks it doesn't break # anything. train_dataset = RegressionDataset() eval_dataset = RegressionDataset() model = RegressionDictModel() args = TrainingArguments("./regression", report_to="none") trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() _ = trainer.evaluate() _ = trainer.predict(eval_dataset) def test_evaluation_with_keys_to_drop(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) eval_dataset = RepeatDataset(x) args = TrainingArguments("./test", report_to="none") trainer = Trainer(tiny_gpt2, args, eval_dataset=eval_dataset) # By default the past_key_values are removed result = trainer.predict(eval_dataset) self.assertTrue(isinstance(result.predictions, np.ndarray)) # We can still get them by setting ignore_keys to [] result = trainer.predict(eval_dataset, ignore_keys=[]) self.assertTrue(isinstance(result.predictions, tuple)) self.assertEqual(len(result.predictions), 2) def test_training_arguments_are_left_untouched(self): trainer = get_regression_trainer() trainer.train() args = TrainingArguments("./regression", report_to=[]) dict1, dict2 = args.to_dict(), trainer.args.to_dict() for key in dict1.keys(): # Logging dir can be slightly different as they default to something with the time. if key != "logging_dir": self.assertEqual(dict1[key], dict2[key]) def test_number_of_steps_in_training(self): # Regular training has n_epochs * len(train_dl) steps trainer = get_regression_trainer(learning_rate=0.1) train_output = trainer.train() self.assertEqual(train_output.global_step, self.n_epochs * 64 / self.batch_size) # Check passing num_train_epochs works (and a float version too): trainer = get_regression_trainer(learning_rate=0.1, num_train_epochs=1.5) train_output = trainer.train() self.assertEqual(train_output.global_step, int(1.5 * 64 / self.batch_size)) # If we pass a max_steps, num_train_epochs is ignored trainer = get_regression_trainer(learning_rate=0.1, max_steps=10) train_output = trainer.train() self.assertEqual(train_output.global_step, 10) @require_torch_bf16 @require_intel_extension_for_pytorch def test_number_of_steps_in_training_with_ipex(self): for mix_bf16 in [True, False]: # Regular training has n_epochs * len(train_dl) steps trainer = get_regression_trainer(learning_rate=0.1, use_ipex=True, bf16=mix_bf16, use_cpu=True) train_output = trainer.train() self.assertEqual(train_output.global_step, self.n_epochs * 64 / trainer.args.train_batch_size) # Check passing num_train_epochs works (and a float version too): trainer = get_regression_trainer( learning_rate=0.1, num_train_epochs=1.5, use_ipex=True, bf16=mix_bf16, use_cpu=True ) train_output = trainer.train() self.assertEqual(train_output.global_step, int(1.5 * 64 / trainer.args.train_batch_size)) # If we pass a max_steps, num_train_epochs is ignored trainer = get_regression_trainer( learning_rate=0.1, max_steps=10, use_ipex=True, bf16=mix_bf16, use_cpu=True ) train_output = trainer.train() self.assertEqual(train_output.global_step, 10) @require_peft @require_bitsandbytes def test_bnb_compile(self): from peft import LoraConfig, get_peft_model # Simply tests if initializing a Trainer with a PEFT + compiled model works out of the box # QLoRA + torch compile is not really supported yet, but we should at least support the model # loading and let torch throw the tiny_model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-LlamaForCausalLM", load_in_4bit=True ) peft_config = LoraConfig( r=8, lora_alpha=32, target_modules=["q_proj", "k_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) tiny_model = get_peft_model(tiny_model, peft_config) tiny_model = torch.compile(tiny_model) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( tmp_dir, learning_rate=1e-9, logging_steps=5, ) with self.assertRaises(ValueError): _ = Trainer(tiny_model, args, train_dataset=train_dataset) # noqa @require_peft def test_multiple_peft_adapters(self): from peft import LoraConfig, get_peft_model # Tests if resuming from checkpoint works if the model has multiple adapters MODEL_ID = "hf-internal-testing/tiny-random-LlamaForCausalLM" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) tiny_model = AutoModelForCausalLM.from_pretrained(MODEL_ID) peft_config = LoraConfig( r=4, lora_alpha=16, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) tiny_model = get_peft_model(tiny_model, peft_config, "adapter1") tiny_model.add_adapter("adapter2", peft_config) train_dataset = LineByLineTextDataset( tokenizer=tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=tokenizer.max_len_single_sentence, ) for example in train_dataset.examples: example["labels"] = example["input_ids"] tokenizer.pad_token = tokenizer.eos_token with tempfile.TemporaryDirectory() as tmpdir: args = TrainingArguments( tmpdir, per_device_train_batch_size=1, learning_rate=1e-9, save_steps=5, logging_steps=5, max_steps=10, use_cpu=True, ) trainer = Trainer(tiny_model, args, tokenizer=tokenizer, train_dataset=train_dataset) trainer.train() parameters = dict(tiny_model.named_parameters()) state = dataclasses.asdict(trainer.state) # Reinitialize trainer trainer = Trainer(tiny_model, args, tokenizer=tokenizer, train_dataset=train_dataset) checkpoint = os.path.join(tmpdir, "checkpoint-5") trainer.train(resume_from_checkpoint=checkpoint) parameters1 = dict(tiny_model.named_parameters()) state1 = dataclasses.asdict(trainer.state) self.assertEqual(parameters, parameters1) self.check_trainer_state_are_the_same(state, state1) @require_bitsandbytes def test_rmsprop_bnb(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, optim="rmsprop_bnb" ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) # Check that it trains without errors trainer.train() @require_bitsandbytes def test_rmsprop_bnb_8bit(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, optim="rmsprop_bnb_8bit" ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) # Check that it trains without errors trainer.train() @require_bitsandbytes def test_rmsprop_bnb_32bit(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, optim="rmsprop_bnb_32bit" ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) # Check that it trains without errors trainer.train() def test_neftune(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( "./test", learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, neftune_noise_alpha=0.4, report_to="none", ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.model = trainer._activate_neftune(trainer.model) dummy_input = torch.LongTensor([[1, 0, 1]]).to(torch_device) emb1 = trainer.model.get_input_embeddings()(dummy_input) emb2 = trainer.model.get_input_embeddings()(dummy_input) self.assertFalse(torch.allclose(emb1, emb2), "Neftune noise is not applied!") # redefine the model tiny_gpt2 = GPT2LMHeadModel(config) # Trainer without inf/nan filter args = TrainingArguments( "./test", learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, neftune_noise_alpha=0.4, report_to="none", ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) # Check that it trains without errors trainer.train() # Make sure forward pass works fine _ = trainer.model(dummy_input) self.assertTrue(len(trainer.model.get_input_embeddings()._forward_hooks) == 0) trainer.model.eval() # Check that we get identical embeddings just in case emb1 = trainer.model.get_input_embeddings()(dummy_input) emb2 = trainer.model.get_input_embeddings()(dummy_input) self.assertTrue(torch.allclose(emb1, emb2), "Neftune noise is still applied!") def test_logging_inf_nan_filter(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( "./test", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=False, report_to="none" ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() log_history_no_filter = trainer.state.log_history # Trainer with inf/nan filter args = TrainingArguments( "./test", learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=True, report_to="none" ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() log_history_filter = trainer.state.log_history def is_any_loss_nan_or_inf(log_history): losses = [l["loss"] for l in log_history[:-1]] return any(math.isnan(x) for x in losses) or any(math.isinf(x) for x in losses) self.assertTrue(is_any_loss_nan_or_inf(log_history_no_filter)) self.assertFalse(is_any_loss_nan_or_inf(log_history_filter)) def test_train_and_eval_dataloaders(self): if torch_device == "cuda": n_gpu = max(1, backend_device_count(torch_device)) else: n_gpu = 1 trainer = get_regression_trainer(learning_rate=0.1, per_device_train_batch_size=16) self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16 * n_gpu) trainer = get_regression_trainer(learning_rate=0.1, per_device_eval_batch_size=16) self.assertEqual(trainer.get_eval_dataloader().total_batch_size, 16 * n_gpu) # Check drop_last works trainer = get_regression_trainer( train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32 ) self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu) + 1) self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu) + 1) trainer = get_regression_trainer( train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32, dataloader_drop_last=True, ) self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu)) self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu)) # Check passing a new dataset for evaluation works new_eval_dataset = RegressionDataset(length=128) self.assertEqual(len(trainer.get_eval_dataloader(new_eval_dataset)), 128 // (32 * n_gpu)) # tests that we do not require dataloader to have a .dataset attribute def test_dataloader_without_dataset(self): train_dataset = RegressionDataset(length=128) with tempfile.TemporaryDirectory() as tmp_dir: trainer = CustomDataloaderTrainer( model=RegressionModel(), train_dataset=train_dataset, eval_dataset=train_dataset, args=TrainingArguments(output_dir=tmp_dir, report_to="none"), ) trainer.train() trainer.evaluate() def test_get_eval_dataloader_without_persistent_workers(self): train_dataset = RegressionDataset() config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) args = TrainingArguments("./test", report_to="none", dataloader_persistent_workers=False) # Single evaluation dataset eval_dataset = RegressionDataset() trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset, eval_dataset=eval_dataset) # Mocking the prepare method to avoid the dataloader changing with each call to get_eval_dataloader trainer.accelerator.prepare = lambda x: x default_dataloader = trainer.get_eval_dataloader() dataloader_with_dataset = trainer.get_eval_dataloader(eval_dataset) self.assertEqual(default_dataloader.dataset, eval_dataset) self.assertEqual(dataloader_with_dataset.dataset, eval_dataset) self.assertNotEqual(default_dataloader, dataloader_with_dataset) # Multiple evaluation datasets first_dataset = RegressionDataset() second_dataset = RegressionDataset() trainer = Trainer( tiny_gpt2, args, train_dataset=train_dataset, eval_dataset={"first": first_dataset, "second": second_dataset}, ) # Mocking the prepare method to avoid the dataloader changing with each call to get_eval_dataloader trainer.accelerator.prepare = lambda x: x first_dataloader = trainer.get_eval_dataloader("first") first_dataloader_repeated = trainer.get_eval_dataloader("first") second_dataloader = trainer.get_eval_dataloader("second") second_dataloader_repeated = trainer.get_eval_dataloader("second") self.assertEqual(first_dataset, first_dataloader.dataset) self.assertEqual(first_dataloader.dataset, first_dataloader_repeated.dataset) self.assertEqual(second_dataset, second_dataloader.dataset) self.assertEqual(second_dataloader.dataset, second_dataloader_repeated.dataset) self.assertNotEqual(first_dataloader, first_dataloader_repeated) self.assertNotEqual(second_dataloader, second_dataloader_repeated) def test_get_eval_dataloader_with_persistent_workers(self): train_dataset = RegressionDataset() config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) args = TrainingArguments( "./test", report_to="none", dataloader_persistent_workers=True, dataloader_num_workers=2, ) # Single evaluation dataset eval_dataset = RegressionDataset() trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset, eval_dataset=eval_dataset) # Mocking the prepare method to avoid the dataloader changing with each call to get_eval_dataloader trainer.accelerator.prepare = lambda x: x default_dataloader = trainer.get_eval_dataloader() dataloader_with_dataset = trainer.get_eval_dataloader(eval_dataset) self.assertEqual(default_dataloader.dataset, eval_dataset) self.assertEqual(dataloader_with_dataset.dataset, eval_dataset) self.assertEqual(default_dataloader, dataloader_with_dataset) # Multiple evaluation datasets first_dataset = RegressionDataset() second_dataset = RegressionDataset() trainer = Trainer( tiny_gpt2, args, train_dataset=train_dataset, eval_dataset={"first": first_dataset, "second": second_dataset}, ) # Mocking the prepare method to avoid the dataloader changing with each call to get_eval_dataloader trainer.accelerator.prepare = lambda x: x first_dataloader = trainer.get_eval_dataloader("first") first_dataloader_repeated = trainer.get_eval_dataloader("first") second_dataloader = trainer.get_eval_dataloader("second") second_dataloader_repeated = trainer.get_eval_dataloader("second") self.assertEqual(first_dataset, first_dataloader.dataset) self.assertEqual(first_dataloader.dataset, first_dataloader_repeated.dataset) self.assertEqual(second_dataset, second_dataloader.dataset) self.assertEqual(second_dataloader.dataset, second_dataloader_repeated.dataset) self.assertEqual(first_dataloader, first_dataloader_repeated) self.assertEqual(second_dataloader, second_dataloader_repeated) @require_liger_kernel def test_use_liger_kernel_patching(self): # Test that the model code actually gets patched with Liger kernel from liger_kernel.transformers.rms_norm import LigerRMSNorm from transformers.models.llama import modeling_llama config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) args = TrainingArguments( "./test", use_liger_kernel=True, ) Trainer(tiny_llama, args) # Check that one of the Llama model layers has been correctly patched with Liger kernel self.assertEqual(modeling_llama.LlamaRMSNorm, LigerRMSNorm) @require_liger_kernel @require_torch_gpu def test_use_liger_kernel_trainer(self): # Check that trainer still works with liger kernel applied config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: args = TrainingArguments(tmpdir, learning_rate=1e-2, logging_steps=5, max_steps=20, use_liger_kernel=True) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_lomo @require_torch_gpu def test_lomo(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) previous_params = {n: p.clone() for n, p in tiny_llama.named_parameters()} x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments(tmpdir, learning_rate=1e-2, logging_steps=5, optim="lomo", max_steps=20) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() for name, param in tiny_llama.named_parameters(): self.assertFalse(torch.allclose(param, previous_params[name].to(param.device), rtol=1e-12, atol=1e-12)) @require_lomo @require_torch_gpu def test_adalomo(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="adalomo", ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_grokadamw @require_torch_gpu def test_grokadamw(): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=2e-5, logging_steps=5, optim="grokadamw", max_steps=20, ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() def test_galore_matched_modules(self): regex_patterns = [r".*.attn.*", r".*.mlp.*"] module_names = [ "model.transformer.h.0.ln_1", "model.transformer.h.0.attn.q_proj", "model.lm_head", "model.transformer.h.0.mlp.up_proj", ] expected_values = [False, True, False, True] for expected_value, module_name in zip(expected_values, module_names): is_module_matched, is_regex = check_target_module_exists(regex_patterns, module_name, return_is_regex=True) self.assertTrue(is_module_matched == expected_value) if is_module_matched: self.assertTrue(is_regex) exact_patterns = ["q_proj", "up_proj"] module_names = [ "model.transformer.h.0.ln_1", "model.transformer.h.0.attn.q_proj", "model.lm_head", "model.transformer.h.0.mlp.up_proj", ] expected_values = [False, True, False, True] for expected_value, module_name in zip(expected_values, module_names): is_module_matched, is_regex = check_target_module_exists(exact_patterns, module_name, return_is_regex=True) self.assertTrue(is_module_matched == expected_value) if is_module_matched: self.assertFalse(is_regex) simple_regex = r".*.attn.*" module_names = [ "model.transformer.h.0.ln_1", "model.transformer.h.0.attn.q_proj", "model.lm_head", "model.transformer.h.0.mlp.up_proj", ] expected_values = [False, True, False, False] for expected_value, module_name in zip(expected_values, module_names): is_module_matched, is_regex = check_target_module_exists(simple_regex, module_name, return_is_regex=True) self.assertTrue(is_module_matched == expected_value) if is_module_matched: self.assertTrue(is_regex) simple_regex = "model.transformer.h.0.attn.q_proj" module_names = [ "model.transformer.h.0.ln_1", "model.transformer.h.0.attn.q_proj", "model.lm_head", "model.transformer.h.0.mlp.up_proj", ] expected_values = [False, True, False, False] for expected_value, module_name in zip(expected_values, module_names): is_module_matched, is_regex = check_target_module_exists(simple_regex, module_name, return_is_regex=True) self.assertTrue(is_module_matched == expected_value) if is_module_matched: self.assertFalse(is_regex) target_modules = ["attn", "mlp"] module_names = [ "model.transformer.h.0.ln_1", "model.transformer.h.0.attn.q_proj", "model.lm_head", "model.transformer.h.0.mlp.up_proj", ] expected_values = [False, True, False, True] for expected_value, module_name in zip(expected_values, module_names): is_module_matched, is_regex = check_target_module_exists(target_modules, module_name, return_is_regex=True) self.assertTrue(is_module_matched == expected_value) if is_module_matched: self.assertFalse(is_regex) @require_galore_torch @require_torch_gpu def test_galore(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="galore_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_galore_torch @require_torch_gpu def test_galore_extra_args(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="galore_adamw", optim_args="rank=64, update_proj_gap=100, scale=0.10", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_galore_torch @require_torch_gpu def test_galore_layerwise(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="galore_adamw_layerwise", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_galore_torch @require_torch_gpu def test_galore_layerwise_with_scheduler(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="galore_adamw_layerwise", lr_scheduler_type="cosine", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_galore_torch @require_torch_gpu def test_galore_adamw_8bit(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="galore_adamw_8bit", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_galore_torch @require_torch_gpu def test_galore_adafactor(self): # These are the intervals of the peak memory usage of training such a tiny model # if the peak memory goes outside that range, then we know there might be a bug somewhere upper_bound_pm = 700 lower_bound_pm = 650 config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir, TorchTracemalloc() as tracemalloc: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="galore_adafactor", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() galore_peak_memory = tracemalloc.peaked + bytes2megabytes(tracemalloc.begin) self.assertTrue(galore_peak_memory < upper_bound_pm) self.assertTrue(lower_bound_pm < galore_peak_memory) @require_galore_torch @require_torch_gpu def test_galore_adafactor_attention_only(self): # These are the intervals of the peak memory usage of training such a tiny model # if the peak memory goes outside that range, then we know there might be a bug somewhere upper_bound_pm = 700 lower_bound_pm = 650 config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir, TorchTracemalloc() as tracemalloc: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="galore_adafactor", optim_target_modules=["q_proj", "k_proj", "v_proj"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() galore_peak_memory = tracemalloc.peaked + bytes2megabytes(tracemalloc.begin) self.assertTrue(galore_peak_memory < upper_bound_pm) self.assertTrue(lower_bound_pm < galore_peak_memory) @require_galore_torch @require_torch_gpu def test_galore_adafactor_all_linear(self): # These are the intervals of the peak memory usage of training such a tiny model # if the peak memory goes outside that range, then we know there might be a bug somewhere upper_bound_pm = 700 lower_bound_pm = 650 config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir, TorchTracemalloc() as tracemalloc: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="galore_adafactor", optim_target_modules="all-linear", ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() galore_peak_memory = tracemalloc.peaked + bytes2megabytes(tracemalloc.begin) self.assertTrue(galore_peak_memory < upper_bound_pm) self.assertTrue(lower_bound_pm < galore_peak_memory) @require_galore_torch @require_torch_gpu def test_galore_lr_display_without_scheduler(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: learning_rate = 1e-9 num_steps = 10 # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=learning_rate, logging_steps=5, optim="galore_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) trainer.create_optimizer_and_scheduler(num_training_steps=num_steps) # reflects displayed lr in trainer self.assertEqual(trainer.get_learning_rates(), [learning_rate, learning_rate]) @require_galore_torch @require_torch_gpu def test_galore_lr_display_with_scheduler(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: learning_rate = 2e-4 num_train_epochs = 2 num_warmup_steps = 5 # Trainer without inf/nan filter args = TrainingArguments( tmpdir, num_train_epochs=num_train_epochs, learning_rate=learning_rate, warmup_steps=num_warmup_steps, lr_scheduler_type="cosine", logging_steps=1, optim="galore_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # creating log history of trainer, results don't matter trainer.train() logs = trainer.state.log_history[1:][:-1] # reach given learning rate peak and end with 0 lr self.assertTrue(logs[num_warmup_steps - 2]["learning_rate"] == learning_rate) self.assertTrue(logs[-1]["learning_rate"] == 0) # increasing and decreasing pattern of lrs increasing_lrs = [ logs[i]["learning_rate"] < logs[i + 1]["learning_rate"] for i in range(len(logs)) if i < num_warmup_steps - 2 ] decreasing_lrs = [ logs[i]["learning_rate"] > logs[i + 1]["learning_rate"] for i in range(len(logs) - 1) if i >= num_warmup_steps - 2 ] self.assertTrue(all(increasing_lrs)) self.assertTrue(all(decreasing_lrs)) # warm up steps << total steps self.assertTrue(len(decreasing_lrs) > len(increasing_lrs)) @require_torch_multi_accelerator def test_data_is_not_parallelized_when_model_is_parallel(self): model = RegressionModel() # Make the Trainer believe it's a parallelized model model.is_parallelizable = True model.model_parallel = True args = TrainingArguments( "./regression", per_device_train_batch_size=16, per_device_eval_batch_size=16, report_to="none" ) trainer = Trainer(model, args, train_dataset=RegressionDataset(), eval_dataset=RegressionDataset()) # Check the Trainer was fooled self.assertTrue(trainer.is_model_parallel) self.assertEqual(trainer.args.n_gpu, 1) # The batch size of the training and evaluation dataloaders should be 16, not 16 * n_gpu self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16) self.assertEqual(len(trainer.get_train_dataloader()), 64 // 16) self.assertEqual(trainer.get_eval_dataloader().total_batch_size, 16) self.assertEqual(len(trainer.get_eval_dataloader()), 64 // 16) def test_evaluate(self): trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_evaluate_with_batch_eval_metrics(self): trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer( a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True, preprocess_logits_for_metrics=lambda logits, labels: logits + 1, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_evaluate_with_jit(self): trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), jit_mode_eval=True) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer( a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy(), jit_mode_eval=True ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, jit_mode_eval=True, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) @require_torch_bf16 @require_intel_extension_for_pytorch def test_evaluate_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer( a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, use_cpu=True ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer( a=1.5, b=2.5, use_ipex=True, eval_len=66, compute_metrics=AlmostAccuracy(), bf16=mix_bf16, use_cpu=True, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, use_ipex=True, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, bf16=mix_bf16, use_cpu=True, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_predict(self): trainer = get_regression_trainer(a=1.5, b=2.5) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With more than one output of the model trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"]) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_predict_with_batch_eval_metrics(self): trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True ) results = trainer.predict(trainer.eval_dataset) preds = results.predictions x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] gt = 1.5 * x + 2.5 self.assertTrue(np.allclose(preds, gt)) expected_acc = AlmostAccuracy()((preds, y))["accuracy"] self.assertAlmostEqual(results.metrics["test_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer( a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True ) results = trainer.predict(trainer.eval_dataset) preds = results.predictions x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) expected_acc = AlmostAccuracy()((preds, y))["accuracy"] self.assertAlmostEqual(results.metrics["test_accuracy"], expected_acc) # With more than one output of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True ) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True, ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_predict_with_jit(self): trainer = get_regression_trainer(a=1.5, b=2.5, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With more than one output of the model trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, jit_mode_eval=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], jit_mode_eval=True ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) @require_torch_bf16 @require_intel_extension_for_pytorch def test_predict_with_ipex(self): for mix_bf16 in [True, False]: trainer = get_regression_trainer(a=1.5, b=2.5, use_ipex=True, bf16=mix_bf16, use_cpu=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, use_ipex=True, bf16=mix_bf16, use_cpu=True) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With more than one output of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, use_ipex=True, bf16=mix_bf16, use_cpu=True ) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], use_ipex=True, bf16=mix_bf16, use_cpu=True, ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_dynamic_shapes(self): eval_dataset = DynamicShapesDataset(batch_size=self.batch_size) model = RegressionModel(a=2, b=1) args = TrainingArguments("./regression", report_to="none") trainer = Trainer(model, args, eval_dataset=eval_dataset) # Check evaluation can run to completion _ = trainer.evaluate() # Check predictions preds = trainer.predict(eval_dataset) for expected, seen in zip(eval_dataset.ys, preds.label_ids): self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) for expected, seen in zip(eval_dataset.xs, preds.predictions): self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) # Same tests with eval accumulation args = TrainingArguments("./regression", eval_accumulation_steps=2, report_to="none") trainer = Trainer(model, args, eval_dataset=eval_dataset) # Check evaluation can run to completion _ = trainer.evaluate() # Check predictions preds = trainer.predict(eval_dataset) for expected, seen in zip(eval_dataset.ys, preds.label_ids): self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) for expected, seen in zip(eval_dataset.xs, preds.predictions): self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) def test_log_level(self): # testing only --log_level (--log_level_replica requires multiple gpus and DDP and is tested elsewhere) logger = logging.get_logger() log_info_string = "Running training" # test with the default log_level - should be the same as before and thus we test depending on is_info is_info = logging.get_verbosity() <= 20 with CaptureLogger(logger) as cl: trainer = get_regression_trainer() trainer.train() if is_info: self.assertIn(log_info_string, cl.out) else: self.assertNotIn(log_info_string, cl.out) with LoggingLevel(logging.INFO): # test with low log_level - lower than info with CaptureLogger(logger) as cl: trainer = get_regression_trainer(log_level="debug") trainer.train() self.assertIn(log_info_string, cl.out) with LoggingLevel(logging.INFO): # test with high log_level - should be quiet with CaptureLogger(logger) as cl: trainer = get_regression_trainer(log_level="error") trainer.train() self.assertNotIn(log_info_string, cl.out) def test_save_checkpoints(self): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5) trainer.train() self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size)) # With a regular model that is not a PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, pretrained=False) trainer.train() self.check_saved_checkpoints(tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False) @require_safetensors def test_safe_checkpoints(self): for save_safetensors in [True, False]: with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, save_steps=5, save_safetensors=save_safetensors) trainer.train() self.check_saved_checkpoints( tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), safe_weights=save_safetensors ) # With a regular model that is not a PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, save_steps=5, pretrained=False, save_safetensors=save_safetensors ) trainer.train() self.check_saved_checkpoints( tmpdir, 5, int(self.n_epochs * 64 / self.batch_size), False, safe_weights=save_safetensors ) def test_load_best_model_with_save(self): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, save_steps=5, evaluation_strategy="steps", eval_steps=5, max_steps=9, ) trainer.train() # Check that we have the last known step: assert os.path.exists( os.path.join(tmpdir, f"checkpoint-{trainer.state.max_steps}") ), f"Could not find checkpoint-{trainer.state.max_steps}" # And then check the last step assert os.path.exists(os.path.join(tmpdir, "checkpoint-9")), "Could not find checkpoint-9" # Now test that using a limit works # Should result in: # - save at step 5 (but is deleted) # - save at step 10 (loaded in at the end when `load_best_model=True`) # - save at step 11 with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, save_steps=5, evaluation_strategy="steps", eval_steps=5, load_best_model_at_end=True, save_total_limit=2, max_steps=11, ) trainer.train() # Check that we have the last known step: assert os.path.exists(os.path.join(tmpdir, "checkpoint-11")), "Could not find checkpoint-11" # And then check the last multiple assert os.path.exists(os.path.join(tmpdir, "checkpoint-10")), "Could not find checkpoint-10" # Finally check that we don't have an old one assert not os.path.exists(os.path.join(tmpdir, "checkpoint-5")), "Found checkpoint-5, limit not respected" # Finally check that the right model was loaded in, checkpoint-10 # this goes by the last `eval` step check to do so, so it won't be # the last model *saved* model_state = trainer.model.state_dict() final_model_weights = safetensors.torch.load_file( os.path.join(tmpdir, "checkpoint-10", "model.safetensors") ) for k, v in model_state.items(): assert torch.allclose(v, final_model_weights[k]), f"{k} is not the same" @require_torch_multi_accelerator def test_run_seq2seq_double_train_wrap_once(self): # test that we don't wrap the model more than once # since wrapping primarily happens on multi-gpu setup we want multiple gpus to test for # example DataParallel(DataParallel(model)) trainer = get_regression_trainer() trainer.train() model_wrapped_before = trainer.model_wrapped trainer.train() model_wrapped_after = trainer.model_wrapped self.assertIs(model_wrapped_before, model_wrapped_after, "should be not wrapped twice") @require_torch_up_to_2_accelerators def test_can_resume_training(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: kwargs = { "output_dir": tmpdir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "logging_steps": 5, } trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check with a later checkpoint that it also works when we span over one epoch checkpoint = os.path.join(tmpdir, "checkpoint-15") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # With a regular model that is not a PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: kwargs = { "output_dir": tmpdir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "pretrained": False, } trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check with a later checkpoint that it also works when we span over one epoch checkpoint = os.path.join(tmpdir, "checkpoint-15") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check failures # 1. fail to find a bogus checkpoint trainer = get_regression_trainer() with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus") self.assertTrue("Can't find a valid checkpoint at" in str(context.exception)) # 2. fail to find any checkpoint - due a fresh output_dir output_dir2 = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(output_dir=output_dir2) with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=True) self.assertTrue("No valid checkpoint found in output directory" in str(context.exception)) @unittest.skip( reason="@muellerzr: Fix once Trainer can take an accelerate configuration. Need to set `seedable_sampler=True`." ) def test_resume_training_with_randomness(self): # For more than 1 GPUs, since the randomness is introduced in the model and with DataParallel (which is used # in this test for more than 2 GPUs), the calls to the torch RNG will happen in a random order (sometimes # GPU 0 will call first and sometimes GPU 1). random_torch = not torch.cuda.is_available() or torch.cuda.device_count() <= 1 if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True train_dataset = RegressionDataset(length=128) eval_dataset = RegressionDataset() with self.subTest("Test every step"): config = RegressionModelConfig(a=0, b=2, random_torch=random_torch) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() model = RegressionRandomPreTrainedModel(config) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, "checkpoint-15")) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() self.assertAlmostEqual(a, a1, delta=1e-5) self.assertAlmostEqual(b, b1, delta=1e-5) with self.subTest("Test every epoch"): config = RegressionModelConfig(a=0, b=2, random_torch=random_torch) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_strategy="epoch", learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() model = RegressionRandomPreTrainedModel(config) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) checkpoints = [d for d in os.listdir(tmp_dir) if d.startswith("checkpoint-")] # There should be one checkpoint per epoch. self.assertEqual(len(checkpoints), 3) checkpoint_dir = sorted(checkpoints, key=lambda x: int(x.replace("checkpoint-", "")))[0] trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, checkpoint_dir)) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() self.assertAlmostEqual(a, a1, delta=1e-5) self.assertAlmostEqual(b, b1, delta=1e-5) @slow @require_accelerate @require_torch_non_multi_accelerator def test_auto_batch_size_finder(self): if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True SRC_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), "..", "..", "examples", "pytorch", "text-classification") ) sys.path.append(SRC_DIR) import run_glue with tempfile.TemporaryDirectory() as tmpdir: testargs = f""" run_glue.py --model_name_or_path distilbert/distilbert-base-uncased --task_name mrpc --do_train --do_eval --max_seq_len 128 --per_device_train_batch_size 4096 --learning_rate 2e-5 --num_train_epochs 1 --output_dir {tmpdir} --auto_find_batch_size 0 """.split() with self.assertRaises(RuntimeError): with patch.object(sys, "argv", testargs): run_glue.main() testargs[-1] = "1" with patch.object(sys, "argv", testargs): run_glue.main() @require_deepspeed def test_auto_batch_size_with_resume_from_checkpoint_with_deepspeed(self): train_dataset = RegressionDataset(length=128) config = RegressionModelConfig(a=0, b=2) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() class MockCudaOOMCallback(TrainerCallback): def on_step_end(self, args, state, control, **kwargs): # simulate OOM on the first step if state.train_batch_size >= 16: raise RuntimeError("CUDA out of memory.") deepspeed = { "zero_optimization": { "stage": 1, }, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", } args = RegressionTrainingArguments( tmp_dir, do_train=True, max_steps=2, save_steps=1, per_device_train_batch_size=16, auto_find_batch_size=True, deepspeed=deepspeed, ) # Note: This can have issues, for now we don't support this functionality # ref: https://github.com/huggingface/transformers/pull/29057 with self.assertRaises(NotImplementedError): _ = Trainer(model, args, train_dataset=train_dataset, callbacks=[MockCudaOOMCallback()]) def test_auto_batch_size_with_resume_from_checkpoint(self): train_dataset = RegressionDataset(length=128) config = RegressionModelConfig(a=0, b=2) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() class MockCudaOOMCallback(TrainerCallback): def on_step_end(self, args, state, control, **kwargs): # simulate OOM on the first step if state.train_batch_size >= 16: raise RuntimeError("CUDA out of memory.") args = RegressionTrainingArguments( tmp_dir, do_train=True, max_steps=2, save_steps=1, per_device_train_batch_size=16, auto_find_batch_size=True, ) trainer = Trainer(model, args, train_dataset=train_dataset, callbacks=[MockCudaOOMCallback()]) trainer.train() # After `auto_find_batch_size` is ran we should now be at 8 self.assertEqual(trainer._train_batch_size, 8) # We can then make a new Trainer trainer = Trainer(model, args, train_dataset=train_dataset) # Check we are at 16 to start self.assertEqual(trainer._train_batch_size, 16 * max(trainer.args.n_gpu, 1)) trainer.train(resume_from_checkpoint=True) # We should be back to 8 again, picking up based upon the last ran Trainer self.assertEqual(trainer._train_batch_size, 8) # regression for this issue: https://github.com/huggingface/transformers/issues/12970 def test_training_with_resume_from_checkpoint_false(self): train_dataset = RegressionDataset(length=128) eval_dataset = RegressionDataset() config = RegressionModelConfig(a=0, b=2) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train(resume_from_checkpoint=False) @require_torch_up_to_2_accelerators def test_resume_training_with_shard_checkpoint(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") self.convert_to_sharded_checkpoint(checkpoint) # Reinitialize trainer trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_safetensors @require_torch_up_to_2_accelerators def test_resume_training_with_safe_checkpoint(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). for initial_safe in [False, True]: for loaded_safe in [False, True]: with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, save_safetensors=initial_safe, ) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") self.convert_to_sharded_checkpoint(checkpoint, load_safe=initial_safe, save_safe=loaded_safe) # Reinitialize trainer trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, save_safetensors=loaded_safe ) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_torch_up_to_2_accelerators def test_resume_training_with_gradient_accumulation(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_torch_up_to_2_accelerators def test_resume_training_with_frozen_params(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.model.a.requires_grad_(False) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.model.a.requires_grad_(False) trainer.train(resume_from_checkpoint=checkpoint) self.assertFalse(trainer.model.a.requires_grad) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) def test_load_best_model_at_end(self): total = int(self.n_epochs * 64 / self.batch_size) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, eval_strategy="steps", save_steps=5, load_best_model_at_end=True, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss") with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, eval_strategy="steps", save_steps=5, load_best_model_at_end=True, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), ) self.assertTrue(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_accuracy", greater_is_better=True) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), ) self.assertTrue(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 64 // self.batch_size, total) self.check_best_model_has_been_loaded( tmpdir, 64 // self.batch_size, total, trainer, "eval_accuracy", greater_is_better=True ) # Test this works with a non PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, learning_rate=0.1, eval_steps=5, eval_strategy="steps", save_steps=5, load_best_model_at_end=True, pretrained=False, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=False) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss", is_pretrained=False) @require_safetensors def test_load_best_model_from_safetensors(self): total = int(self.n_epochs * 64 / self.batch_size) for save_safetensors, pretrained in product([False, True], [False, True]): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, eval_strategy="steps", save_steps=5, load_best_model_at_end=True, save_safetensors=save_safetensors, pretrained=pretrained, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=pretrained, safe_weights=save_safetensors) self.check_best_model_has_been_loaded( tmpdir, 5, total, trainer, "eval_loss", is_pretrained=pretrained, safe_weights=save_safetensors ) @slow def test_trainer_eval_mrpc(self): MODEL_ID = "google-bert/bert-base-cased-finetuned-mrpc" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID) data_args = GlueDataTrainingArguments( task_name="mrpc", data_dir=f"{get_tests_dir()}/fixtures/tests_samples/MRPC", overwrite_cache=True ) eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev") training_args = TrainingArguments(output_dir="./examples", use_cpu=True, report_to="none") trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset) result = trainer.evaluate() self.assertLess(result["eval_loss"], 0.2) @slow def test_trainer_eval_multiple(self): MODEL_ID = "openai-community/gpt2" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) model = AutoModelForCausalLM.from_pretrained(MODEL_ID) dataset = LineByLineTextDataset( tokenizer=tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=tokenizer.max_len_single_sentence, ) for example in dataset.examples: example["labels"] = example["input_ids"] training_args = TrainingArguments( output_dir="./examples", use_cpu=True, per_device_eval_batch_size=1, report_to="none", ) trainer = Trainer( model=model, args=training_args, eval_dataset={ "data1": dataset, "data2": dataset, }, ) result = trainer.evaluate() self.assertIn("eval_data1_loss", result) self.assertIn("eval_data2_loss", result) @slow def test_trainer_eval_lm(self): MODEL_ID = "distilbert/distilroberta-base" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) dataset = LineByLineTextDataset( tokenizer=tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=tokenizer.max_len_single_sentence, ) self.assertEqual(len(dataset), 31) def test_training_iterable_dataset(self): config = RegressionModelConfig() model = RegressionPreTrainedModel(config) # Adding one column not used by the model should have no impact train_dataset = SampleIterableDataset(label_names=["labels", "extra"]) args = RegressionTrainingArguments(output_dir="./examples", max_steps=4) trainer = Trainer(model=model, args=args, train_dataset=train_dataset) trainer.train() self.assertEqual(trainer.state.global_step, 4) loader = trainer.get_train_dataloader() self.assertIsInstance(loader, torch.utils.data.DataLoader) self.assertIsInstance(loader.sampler, torch.utils.data.dataloader._InfiniteConstantSampler) def test_evaluation_iterable_dataset(self): config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) # Adding one column not used by the model should have no impact eval_dataset = SampleIterableDataset(label_names=["labels", "extra"]) args = RegressionTrainingArguments(output_dir="./examples") trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.dataset.x, trainer.eval_dataset.dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size eval_dataset = SampleIterableDataset(length=66) results = trainer.evaluate(eval_dataset) x, y = eval_dataset.dataset.x, eval_dataset.dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_predict_iterable_dataset(self): config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() args = RegressionTrainingArguments(output_dir="./examples") trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy()) preds = trainer.predict(trainer.eval_dataset).predictions x = eval_dataset.dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size # Adding one column not used by the model should have no impact test_dataset = SampleIterableDataset(length=66, label_names=["labels", "extra"]) preds = trainer.predict(test_dataset).predictions x = test_dataset.dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) def test_num_train_epochs_in_training(self): # len(train_dl) < gradient_accumulation_steps shouldn't give ``ZeroDivisionError`` when ``max_steps`` is given. # It should give 1 update step for each epoch. trainer = get_regression_trainer( max_steps=3, train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5 ) train_output = trainer.train() self.assertEqual(train_output.global_step, 3) # Even ``max_steps`` is not specified, we still expect 1 update step for each epoch if # len(train_dl) < gradient_accumulation_steps. trainer = get_regression_trainer(train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5) train_output = trainer.train() self.assertEqual(train_output.global_step, int(self.n_epochs)) def test_early_stopping_callback(self): # early stopping stops training before num_training_epochs with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, load_best_model_at_end=True, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model="accuracy", ) trainer.add_callback(EarlyStoppingCallback(1, 0.0001)) train_output = trainer.train() self.assertLess(train_output.global_step, 20 * 64 / 16) # Invalid inputs to trainer with early stopping callback result in assertion error with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, eval_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model="accuracy", ) trainer.add_callback(EarlyStoppingCallback(1)) self.assertEqual(trainer.state.global_step, 0) try: trainer.train() except AssertionError: self.assertEqual(trainer.state.global_step, 0) def test_flos_extraction(self): trainer = get_regression_trainer(learning_rate=0.1) def assert_flos_extraction(trainer, wrapped_model_to_check): self.assertEqual(trainer.model, trainer.accelerator.unwrap_model(wrapped_model_to_check)) self.assertGreaterEqual( getattr(trainer.accelerator.unwrap_model(wrapped_model_to_check).config, "total_flos", 0), 0 ) # with plain model assert_flos_extraction(trainer, trainer.model) # with enforced DataParallel assert_flos_extraction(trainer, nn.DataParallel(trainer.model)) trainer.train() self.assertTrue(isinstance(trainer.state.total_flos, float)) def check_checkpoint_deletion(self, trainer, output_dir, expected): # Make fake checkpoints for n in [5, 10, 15, 20, 25]: os.makedirs(os.path.join(output_dir, f"{PREFIX_CHECKPOINT_DIR}-{n}"), exist_ok=True) trainer._rotate_checkpoints(output_dir=output_dir) glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{PREFIX_CHECKPOINT_DIR}-*")] values = [int(re.match(f".*{PREFIX_CHECKPOINT_DIR}-([0-9]+)", d).groups()[0]) for d in glob_checkpoints] self.assertSetEqual(set(values), set(expected)) def test_checkpoint_rotation(self): with tempfile.TemporaryDirectory() as tmp_dir: # Without best model at end trainer = get_regression_trainer(output_dir=tmp_dir, save_total_limit=2) self.check_checkpoint_deletion(trainer, tmp_dir, [20, 25]) # With best model at end trainer = get_regression_trainer( output_dir=tmp_dir, eval_strategy="steps", load_best_model_at_end=True, save_total_limit=2 ) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5") self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25]) # Edge case: we don't always honor save_total_limit=1 if load_best_model_at_end=True to be able to resume # from checkpoint trainer = get_regression_trainer( output_dir=tmp_dir, eval_strategy="steps", load_best_model_at_end=True, save_total_limit=1 ) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-25") self.check_checkpoint_deletion(trainer, tmp_dir, [25]) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5") self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25]) def test_compare_trainer_and_checkpoint_args_logging(self): logger = logging.get_logger() with tempfile.TemporaryDirectory() as tmpdir, CaptureLogger(logger) as cl: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, eval_steps=5, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.train() checkpoint = os.path.join(tmpdir, "checkpoint-5") checkpoint_trainer = get_regression_trainer( output_dir=tmpdir, train_len=256, eval_steps=10, gradient_accumulation_steps=4, per_device_train_batch_size=8, save_steps=10, learning_rate=0.1, ) checkpoint_trainer.train(resume_from_checkpoint=checkpoint) self.assertIn("save_steps: 10 (from args) != 5 (from trainer_state.json)", cl.out) self.assertIn( "per_device_train_batch_size: 8 (from args) != 4 (from trainer_state.json)", cl.out, ) self.assertIn( "eval_steps: 10 (from args) != 5 (from trainer_state.json)", cl.out, ) def check_mem_metrics(self, trainer, check_func): metrics = trainer.train().metrics check_func("init_mem_cpu_alloc_delta", metrics) check_func("train_mem_cpu_alloc_delta", metrics) if backend_device_count(torch_device) > 0: check_func("init_mem_gpu_alloc_delta", metrics) check_func("train_mem_gpu_alloc_delta", metrics) metrics = trainer.evaluate() check_func("eval_mem_cpu_alloc_delta", metrics) if backend_device_count(torch_device) > 0: check_func("eval_mem_gpu_alloc_delta", metrics) metrics = trainer.predict(RegressionDataset()).metrics check_func("test_mem_cpu_alloc_delta", metrics) if backend_device_count(torch_device) > 0: check_func("test_mem_gpu_alloc_delta", metrics) def test_mem_metrics(self): # with mem metrics enabled trainer = get_regression_trainer(skip_memory_metrics=False) self.check_mem_metrics(trainer, self.assertIn) # with mem metrics disabled trainer = get_regression_trainer(skip_memory_metrics=True) self.check_mem_metrics(trainer, self.assertNotIn) @require_torch_accelerator def test_fp16_full_eval(self): # this is a sensitive test so let's keep debugging printouts in place for quick diagnosis. # it's using pretty large safety margins, but small enough to detect broken functionality. debug = 0 n_gpus = backend_device_count(torch_device) bs = 8 eval_len = 16 * n_gpus # make the params somewhat big so that there will be enough RAM consumed to be able to # measure things. We should get about 64KB for a+b in fp32 a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 # 1. with fp16_full_eval disabled trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False) metrics = trainer.evaluate() del trainer gc.collect() fp32_init = metrics["init_mem_gpu_alloc_delta"] fp32_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp32_init {fp32_init}") print(f"fp32_eval {fp32_eval}") # here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram. # perfect world: fp32_init == 64<<10 self.assertGreater(fp32_init, 59_000) # after eval should be no extra memory allocated - with a small margin (other than the peak # memory consumption for the forward calculation that gets recovered) # perfect world: fp32_eval == close to zero self.assertLess(fp32_eval, 5_000) # 2. with fp16_full_eval enabled trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, fp16_full_eval=True, skip_memory_metrics=False) metrics = trainer.evaluate() fp16_init = metrics["init_mem_gpu_alloc_delta"] fp16_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp16_init {fp16_init}") print(f"fp16_eval {fp16_eval}") # here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0 # perfect world: fp16_init == close to zero self.assertLess(fp16_init, 5_000) # here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back) # perfect world: fp32_init == 32<<10 self.assertGreater(fp16_eval, 27_000) # 3. relative comparison fp32 vs full fp16 # should be about half of fp16_init # perfect world: fp32_init/2 == fp16_eval self.assertAlmostEqual(fp16_eval, fp32_init / 2, delta=5_000) @require_torch_non_multi_gpu @require_torchdynamo @require_torch_tensorrt_fx def test_torchdynamo_full_eval(self): import torchdynamo # torchdynamo at the moment doesn't support DP/DDP, therefore require a single gpu n_gpus = get_gpu_count() bs = 8 eval_len = 16 * n_gpus # make the params are somewhat big so that there will be enough RAM consumed to be able to # measure things. We should get about 64KB for a+b in fp32 a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 # 1. Default - without TorchDynamo trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len) metrics = trainer.evaluate() original_eval_loss = metrics["eval_loss"] del trainer # 2. TorchDynamo eager trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="eager") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) del trainer torchdynamo.reset() # 3. TorchDynamo nvfuser trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="nvfuser") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) torchdynamo.reset() # 4. TorchDynamo fx2trt trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, torchdynamo="fx2trt") metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) torchdynamo.reset() @unittest.skip(reason="torch 2.0.0 gives `ModuleNotFoundError: No module named 'torchdynamo'`.") @require_torch_non_multi_gpu @require_torchdynamo def test_torchdynamo_memory(self): # torchdynamo at the moment doesn't support DP/DDP, therefore require a single gpu import torchdynamo class CustomTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): x = inputs["x"] output = model(x) if self.args.n_gpu == 1: return output.mean() return output class MyModule(torch.nn.Module): """Simple module that does aggressive fusion""" def __init__(self): super().__init__() def forward(self, x): for _ in range(20): x = torch.cos(x) return x mod = MyModule() # 1. without TorchDynamo (eager baseline) a = torch.ones(1024, 1024, device="cuda", requires_grad=True) a.grad = None trainer = CustomTrainer(model=mod) # warmup for _ in range(10): orig_loss = trainer.training_step(mod, {"x": a}) # resets gc.collect() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() orig_loss = trainer.training_step(mod, {"x": a}) orig_peak_mem = torch.cuda.max_memory_allocated() torchdynamo.reset() del trainer # 2. TorchDynamo nvfuser a = torch.ones(1024, 1024, device="cuda", requires_grad=True) a.grad = None args = TrainingArguments(output_dir="None", torchdynamo="nvfuser") trainer = CustomTrainer(model=mod, args=args) # warmup for _ in range(10): loss = trainer.training_step(mod, {"x": a}) # resets gc.collect() torch.cuda.empty_cache() torch.cuda.reset_peak_memory_stats() loss = trainer.training_step(mod, {"x": a}) peak_mem = torch.cuda.max_memory_allocated() torchdynamo.reset() del trainer # Functional check self.assertAlmostEqual(loss, orig_loss) # AOT Autograd recomputaion and nvfuser recomputation optimization # aggressively fuses the operations and reduce the memory footprint. self.assertGreater(orig_peak_mem, peak_mem * 2) @require_torch_accelerator @require_torch_bf16 def test_bf16_full_eval(self): # note: most of the logic is the same as test_fp16_full_eval # this is a sensitive test so let's keep debugging printouts in place for quick diagnosis. # it's using pretty large safety margins, but small enough to detect broken functionality. debug = 0 n_gpus = backend_device_count(torch_device) bs = 8 eval_len = 16 * n_gpus # make the params somewhat big so that there will be enough RAM consumed to be able to # measure things. We should get about 64KB for a+b in fp32 a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 # 1. with bf16_full_eval disabled trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, skip_memory_metrics=False) metrics = trainer.evaluate() del trainer gc.collect() fp32_init = metrics["init_mem_gpu_alloc_delta"] fp32_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp32_init {fp32_init}") print(f"fp32_eval {fp32_eval}") # here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram. # perfect world: fp32_init == 64<<10 self.assertGreater(fp32_init, 59_000) # after eval should be no extra memory allocated - with a small margin (other than the peak # memory consumption for the forward calculation that gets recovered) # perfect world: fp32_eval == close to zero self.assertLess(fp32_eval, 5_000) # 2. with bf16_full_eval enabled trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, bf16_full_eval=True, skip_memory_metrics=False) metrics = trainer.evaluate() bf16_init = metrics["init_mem_gpu_alloc_delta"] bf16_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"bf16_init {bf16_init}") print(f"bf16_eval {bf16_eval}") # here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0 # perfect world: bf16_init == close to zero self.assertLess(bf16_init, 5_000) # here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back) # perfect world: fp32_init == 32<<10 self.assertGreater(bf16_eval, 27_000) # 3. relative comparison fp32 vs full bf16 # should be about half of bf16_init # perfect world: fp32_init/2 == bf16_eval self.assertAlmostEqual(bf16_eval, fp32_init / 2, delta=5_000) def test_no_wd_param_group(self): model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)])) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer(model=model, args=TrainingArguments(output_dir=tmp_dir, report_to="none")) trainer.create_optimizer_and_scheduler(10) wd_names = ['0.linear1.weight', '0.linear2.weight', '1.0.linear1.weight', '1.0.linear2.weight', '1.1.linear1.weight', '1.1.linear2.weight'] # fmt: skip wd_params = [p for n, p in model.named_parameters() if n in wd_names] no_wd_params = [p for n, p in model.named_parameters() if n not in wd_names] self.assertListEqual(trainer.optimizer.param_groups[0]["params"], wd_params) self.assertListEqual(trainer.optimizer.param_groups[1]["params"], no_wd_params) @slow @require_torch_multi_accelerator def test_end_to_end_example(self): # Tests that `translation.py` will run without issues script_path = os.path.abspath( os.path.join( os.path.dirname(__file__), "..", "..", "examples", "pytorch", "translation", "run_translation.py" ) ) with tempfile.TemporaryDirectory() as tmpdir: command = [ "accelerate", "launch", script_path, "--model_name_or_path", "google-t5/t5-small", "--per_device_train_batch_size", "1", "--output_dir", tmpdir, "--overwrite_output_dir", "--do_train", "--max_train_samples", "64", "--num_train_epochs", "1", "--dataset_name", "wmt16", "--dataset_config", "ro-en", "--source_lang", "en", "--target_lang", "ro", "--do_predict", "--max_predict_samples", "64", "--predict_with_generate", "--ddp_timeout", "60", "--report_to", "none", ] execute_subprocess_async(command) # successful return here == success - any errors would have caused an error or a timeout in the sub-call def test_accelerator_config_empty(self): # Checks that a config can be made with the defaults if not passed with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() # Leaves one option as something *not* basic args = RegressionTrainingArguments( output_dir=tmp_dir, ) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.split_batches, False) self.assertEqual(trainer.accelerator.dispatch_batches, None) self.assertEqual(trainer.accelerator.even_batches, True) self.assertEqual(trainer.accelerator.use_seedable_sampler, True) if GRAD_ACCUM_KWARGS_VERSION_AVAILABLE: # gradient accumulation kwargs configures gradient_state self.assertNotIn("sync_each_batch", trainer.accelerator.gradient_state.plugin_kwargs) def test_accelerator_config_from_dict(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() accelerator_config = { "split_batches": True, "dispatch_batches": True, "even_batches": False, "use_seedable_sampler": True, } if GRAD_ACCUM_KWARGS_VERSION_AVAILABLE: accelerator_config["gradient_accumulation_kwargs"] = {"sync_each_batch": True} # Leaves all options as something *not* basic args = RegressionTrainingArguments( output_dir=tmp_dir, accelerator_config=accelerator_config, ) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.split_batches, True) self.assertEqual(trainer.accelerator.dispatch_batches, True) self.assertEqual(trainer.accelerator.even_batches, False) self.assertEqual(trainer.accelerator.use_seedable_sampler, True) if GRAD_ACCUM_KWARGS_VERSION_AVAILABLE: self.assertEqual(trainer.accelerator.gradient_state.plugin_kwargs["sync_each_batch"], True) def test_accelerator_config_from_yaml(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively with tempfile.TemporaryDirectory() as tmp_dir: path_file = Path(tmp_dir) / "accelerator_config.json" with open(path_file, "w") as f: accelerator_config = { "split_batches": True, "dispatch_batches": True, "even_batches": False, "use_seedable_sampler": False, } if GRAD_ACCUM_KWARGS_VERSION_AVAILABLE: accelerator_config["gradient_accumulation_kwargs"] = {"sync_each_batch": True} json.dump(accelerator_config, f) config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() # Leaves all options as something *not* basic args = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config=path_file) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.split_batches, True) self.assertEqual(trainer.accelerator.dispatch_batches, True) self.assertEqual(trainer.accelerator.even_batches, False) self.assertEqual(trainer.accelerator.use_seedable_sampler, False) if GRAD_ACCUM_KWARGS_VERSION_AVAILABLE: self.assertEqual(trainer.accelerator.gradient_state.plugin_kwargs["sync_each_batch"], True) def test_accelerator_config_from_dataclass(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively accelerator_config = AcceleratorConfig( split_batches=True, dispatch_batches=True, even_batches=False, use_seedable_sampler=False, ) config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() with tempfile.TemporaryDirectory() as tmp_dir: args = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config=accelerator_config) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.split_batches, True) self.assertEqual(trainer.accelerator.dispatch_batches, True) self.assertEqual(trainer.accelerator.even_batches, False) self.assertEqual(trainer.accelerator.use_seedable_sampler, False) @require_accelerate_version_min_0_28 def test_accelerate_config_from_dataclass_grad_accum(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively grad_acc_kwargs = { "num_steps": 10, "adjust_scheduler": False, "sync_with_dataloader": False, "sync_each_batch": True, } accelerator_config = AcceleratorConfig( split_batches=True, dispatch_batches=True, even_batches=False, use_seedable_sampler=False, gradient_accumulation_kwargs=grad_acc_kwargs, ) config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() with tempfile.TemporaryDirectory() as tmp_dir: args = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config=accelerator_config) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.gradient_state.plugin_kwargs["num_steps"], 10) self.assertEqual(trainer.accelerator.gradient_state.plugin_kwargs["adjust_scheduler"], False) self.assertEqual(trainer.accelerator.gradient_state.plugin_kwargs["sync_with_dataloader"], False) self.assertEqual(trainer.accelerator.gradient_state.plugin_kwargs["sync_each_batch"], True) def test_accelerator_config_from_partial(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() # Leaves one option as something *not* basic args = RegressionTrainingArguments( output_dir=tmp_dir, accelerator_config={ "split_batches": True, }, ) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.split_batches, True) self.assertEqual(trainer.accelerator.dispatch_batches, None) self.assertEqual(trainer.accelerator.even_batches, True) self.assertEqual(trainer.accelerator.use_seedable_sampler, True) def test_accelerator_config_from_dict_with_deprecated_args(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively # and maintains the deprecated args if passed in with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() # Leaves all options as something *not* basic with self.assertWarns(FutureWarning) as cm: args = RegressionTrainingArguments( output_dir=tmp_dir, accelerator_config={ "split_batches": True, }, dispatch_batches=False, ) self.assertIn("dispatch_batches", str(cm.warnings[0].message)) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.dispatch_batches, False) self.assertEqual(trainer.accelerator.split_batches, True) with self.assertWarns(FutureWarning) as cm: args = RegressionTrainingArguments( output_dir=tmp_dir, accelerator_config={ "even_batches": False, }, split_batches=True, ) self.assertIn("split_batches", str(cm.warnings[0].message)) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.split_batches, True) self.assertEqual(trainer.accelerator.even_batches, False) self.assertEqual(trainer.accelerator.dispatch_batches, None) def test_accelerator_config_only_deprecated_args(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertWarns(FutureWarning) as cm: args = RegressionTrainingArguments( output_dir=tmp_dir, split_batches=True, ) self.assertIn("split_batches", str(cm.warnings[0].message)) config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.split_batches, True) def test_accelerator_custom_state(self): AcceleratorState._reset_state(reset_partial_state=True) with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(ValueError) as cm: _ = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config={"use_configured_state": True}) self.assertIn("Please define this beforehand", str(cm.warnings[0].message)) _ = Accelerator() _ = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config={"use_configured_state": True}) AcceleratorState._reset_state(reset_partial_state=True) @require_accelerate_version_min_0_28 def test_accelerator_config_from_dict_grad_accum_num_steps(self): with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() # case - TrainingArguments.gradient_accumulation_steps == 1 # - gradient_accumulation_kwargs['num_steps] == 1 # results in grad accum set to 1 args = RegressionTrainingArguments( output_dir=tmp_dir, gradient_accumulation_steps=1, accelerator_config={ "gradient_accumulation_kwargs": { "num_steps": 1, } }, ) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.gradient_state.plugin_kwargs["num_steps"], 1) # case - TrainingArguments.gradient_accumulation_steps > 1 # - gradient_accumulation_kwargs['num_steps] specified # results in exception raised args = RegressionTrainingArguments( output_dir=tmp_dir, gradient_accumulation_steps=2, accelerator_config={ "gradient_accumulation_kwargs": { "num_steps": 10, } }, ) with self.assertRaises(Exception) as context: trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertTrue("The `AcceleratorConfig`'s `num_steps` is set but" in str(context.exception)) def test_accelerator_config_not_instantiated(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(NotImplementedError) as context: _ = RegressionTrainingArguments( output_dir=tmp_dir, accelerator_config=AcceleratorConfig, ) self.assertTrue("Tried passing in a callable to `accelerator_config`" in str(context.exception)) # Now test with a custom subclass @dataclasses.dataclass class CustomAcceleratorConfig(AcceleratorConfig): pass @dataclasses.dataclass class CustomTrainingArguments(TrainingArguments): accelerator_config: dict = dataclasses.field( default=CustomAcceleratorConfig, ) with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(NotImplementedError) as context: _ = CustomTrainingArguments( output_dir=tmp_dir, ) self.assertTrue("Tried passing in a callable to `accelerator_config`" in str(context.exception)) def test_torch_dtype_to_json(self): @dataclasses.dataclass class TorchDtypeTrainingArguments(TrainingArguments): torch_dtype: torch.dtype = dataclasses.field( default=torch.float32, ) for dtype in [ "float32", "float64", "complex64", "complex128", "float16", "bfloat16", "uint8", "int8", "int16", "int32", "int64", "bool", ]: torch_dtype = getattr(torch, dtype) with tempfile.TemporaryDirectory() as tmp_dir: args = TorchDtypeTrainingArguments(output_dir=tmp_dir, torch_dtype=torch_dtype) args_dict = args.to_dict() self.assertIn("torch_dtype", args_dict) self.assertEqual(args_dict["torch_dtype"], dtype) @require_accelerate_version_min_0_30 def test_eval_use_gather_object(self): train_dataset = RegressionDataset() eval_dataset = RegressionDataset() model = RegressionDictModel() args = TrainingArguments("./regression", report_to="none", eval_use_gather_object=True) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() _ = trainer.evaluate() _ = trainer.predict(eval_dataset) @require_torch @is_staging_test class TrainerIntegrationWithHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): for model in [ "test-trainer", "test-trainer-epoch", "test-trainer-step", "test-trainer-tensorboard", "test-trainer-tags", ]: try: delete_repo(token=cls._token, repo_id=model) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-trainer-org") except HTTPError: pass def test_push_to_hub(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer"), push_to_hub=True, hub_token=self._token, ) url = trainer.push_to_hub() # Extract repo_name from the url re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url) self.assertTrue(re_search is not None) repo_name = re_search.groups()[0] self.assertEqual(repo_name, f"{USER}/test-trainer") model = RegressionPreTrainedModel.from_pretrained(repo_name) self.assertEqual(model.a.item(), trainer.model.a.item()) self.assertEqual(model.b.item(), trainer.model.b.item()) def test_push_to_hub_in_organization(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(output_dir=tmp_dir) trainer.save_model() trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-org"), push_to_hub=True, hub_model_id="valid_org/test-trainer-org", hub_token=self._token, ) url = trainer.push_to_hub() # Extract repo_name from the url re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url) self.assertTrue(re_search is not None) repo_name = re_search.groups()[0] self.assertEqual(repo_name, "valid_org/test-trainer-org") model = RegressionPreTrainedModel.from_pretrained("valid_org/test-trainer-org") self.assertEqual(model.a.item(), trainer.model.a.item()) self.assertEqual(model.b.item(), trainer.model.b.item()) def get_commit_history(self, repo): commit_logs = subprocess.run( "git log".split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE, check=True, encoding="utf-8", cwd=repo, ).stdout commits = commit_logs.split("\n\n")[1::2] return [commit.strip() for commit in commits] def test_push_to_hub_with_saves_each_epoch(self): with tempfile.TemporaryDirectory() as tmp_dir: with self.assertLogs(level="WARNING") as logs: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-epoch"), push_to_hub=True, hub_token=self._token, # To avoid any flakiness if the training goes faster than the uploads. hub_always_push=True, save_strategy="epoch", ) trainer.train() commits = list_repo_commits(f"{USER}/test-trainer-epoch", token=self._token) commits = [c.title for c in commits] self.assertIn("initial commit", commits) self.assertIn("Training in progress, epoch 1", commits) self.assertIn("Training in progress, epoch 2", commits) # Epochs 3 and 4 are not guaranteed to be present (empty commits) self.assertTrue(any("Skipping to prevent empty commit." in record.message for record in logs.records)) def test_push_to_hub_with_saves_each_n_steps(self): num_gpus = max(1, backend_device_count(torch_device)) if num_gpus > 2: self.skipTest(reason="More than 2 GPUs available") with tempfile.TemporaryDirectory() as tmp_dir: with self.assertLogs(level="WARNING") as logs: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-step"), push_to_hub=True, hub_token=self._token, # To avoid any flakiness if the training goes faster than the uploads. hub_always_push=True, save_strategy="steps", save_steps=5, ) trainer.train() commits = list_repo_commits(f"{USER}/test-trainer-step", token=self._token) commits = [c.title for c in commits] self.assertIn("initial commit", commits) # Some commits are skipped if nothing has changed # We expect 1 commit per 5 epochs + 1 commit at the end nb_empty_commits = len( [record for record in logs.records if "Skipping to prevent empty commit." in record.message] ) nb_epoch_commits = len([commit for commit in commits if "Training in progress, step" in commit]) # max_steps depend on the number of available GPUs max_steps = math.ceil(trainer.args.num_train_epochs * len(trainer.get_train_dataloader())) nb_expected_commits = len(range(5, max_steps, 5)) # '>=' since final commit might be an empty commit as well (not deterministic) self.assertGreaterEqual(nb_empty_commits + nb_epoch_commits, nb_expected_commits) @require_tensorboard def test_push_to_hub_with_tensorboard_logs(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-tensorboard"), hub_token=self._token, save_strategy="epoch", report_to=["tensorboard"], keep_report_to=True, ) trainer.train() # Push the runs via `push_to_hub()` trainer.push_to_hub() files = list_repo_files(f"{USER}/test-trainer-tensorboard", token=self._token) found_log = False for f in files: if len(f.split("runs")) > 1 and "events.out.tfevents" in f: found_log = True assert found_log is True, "No tensorboard log found in repo" def test_push_to_hub_tags(self): # Checks if `trainer.push_to_hub()` works correctly by adding the desired # tag without having to pass `tags` in `push_to_hub` # see: with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, "test-trainer-tags"), push_to_hub=True, hub_token=self._token, ) trainer.model.add_model_tags(["test-trainer-tags"]) url = trainer.push_to_hub() # Extract repo_name from the url re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url) self.assertTrue(re_search is not None) repo_name = re_search.groups()[0] self.assertEqual(repo_name, f"{USER}/test-trainer-tags") model_card = ModelCard.load(repo_name) self.assertTrue("test-trainer-tags" in model_card.data.tags) @require_torch @require_optuna class TrainerHyperParameterOptunaIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return {} def model_init(trial): if trial is not None: a = trial.suggest_int("a", -4, 4) b = trial.suggest_int("b", -4, 4) else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search(direction="minimize", hp_space=hp_space, hp_name=hp_name, n_trials=4) @require_torch @require_optuna class TrainerHyperParameterMultiObjectOptunaIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return {} def model_init(trial): if trial is not None: a = trial.suggest_int("a", -4, 4) b = trial.suggest_int("b", -4, 4) else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.params) def compute_objective(metrics: Dict[str, float]) -> List[float]: return metrics["eval_loss"], metrics["eval_accuracy"] with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=10, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, compute_metrics=AlmostAccuracy(), ) trainer.hyperparameter_search( direction=["minimize", "maximize"], hp_space=hp_space, hp_name=hp_name, n_trials=4, compute_objective=compute_objective, ) @require_torch @require_ray class TrainerHyperParameterRayIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def ray_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): from ray import tune return { "a": tune.randint(-4, 4), "b": tune.randint(-4, 4), } def model_init(config): if config is None: a = 0 b = 0 else: a = config["a"] b = config["b"] model_config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(model_config) def hp_name(params): return MyTrialShortNamer.shortname(params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="ray", n_trials=4 ) def test_hyperparameter_search(self): self.ray_hyperparameter_search() def test_hyperparameter_search_ray_client(self): import ray from ray.util.client.ray_client_helpers import ray_start_client_server with ray_start_client_server(): assert ray.util.client.ray.is_connected() self.ray_hyperparameter_search() @slow @require_torch @require_sigopt class TrainerHyperParameterSigOptIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return [ {"bounds": {"min": -4, "max": 4}, "name": "a", "type": "int"}, {"bounds": {"min": -4, "max": 4}, "name": "b", "type": "int"}, ] def model_init(trial): if trial is not None: a = trial.assignments["a"] b = trial.assignments["b"] else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.assignments) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="sigopt", n_trials=4 ) optim_test_params = [] if is_torch_available(): default_adam_kwargs = { "betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2), "eps": TrainingArguments.adam_epsilon, "lr": TrainingArguments.learning_rate, } default_lion_kwargs = { "betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2), "lr": TrainingArguments.learning_rate, } default_anyprecision_kwargs = { "use_kahan_summation": False, "momentum_dtype": torch.float32, "variance_dtype": torch.float32, "compensation_buffer_dtype": torch.bfloat16, } optim_test_params = [ ( TrainingArguments(optim=OptimizerNames.ADAMW_HF, output_dir="None"), transformers.optimization.AdamW, default_adam_kwargs, ), ( TrainingArguments(optim=OptimizerNames.ADAMW_HF.value, output_dir="None"), transformers.optimization.AdamW, default_adam_kwargs, ), ( TrainingArguments(optim=OptimizerNames.ADAMW_TORCH, output_dir="None"), torch.optim.AdamW, default_adam_kwargs, ), ( TrainingArguments(optim=OptimizerNames.ADAFACTOR, output_dir="None"), transformers.optimization.Adafactor, { "scale_parameter": False, "relative_step": False, "lr": TrainingArguments.learning_rate, }, ), ] if is_apex_available(): import apex optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None"), apex.optimizers.FusedAdam, default_adam_kwargs, ) ) if is_bitsandbytes_available(): import bitsandbytes as bnb optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_8BIT, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None"), bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.LION, output_dir="None"), bnb.optim.Lion, default_lion_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.LION_8BIT, output_dir="None"), bnb.optim.Lion, default_lion_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None"), bnb.optim.Lion, default_lion_kwargs, ) ) if is_torchdistx_available(): import torchdistx optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None"), torchdistx.optimizers.AnyPrecisionAdamW, dict(default_adam_kwargs, **default_anyprecision_kwargs), ) ) if is_torchao_available(): import torchao optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_TORCH_4BIT, output_dir="None"), torchao.prototype.low_bit_optim.AdamW4bit, default_adam_kwargs, ) ) @require_torch class TrainerOptimizerChoiceTest(unittest.TestCase): def check_optim_and_kwargs(self, training_args: TrainingArguments, expected_cls, expected_kwargs): actual_cls, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args) self.assertEqual(expected_cls, actual_cls) self.assertIsNotNone(optim_kwargs) for p, v in expected_kwargs.items(): self.assertTrue(p in optim_kwargs) actual_v = optim_kwargs[p] self.assertTrue(actual_v == v, f"Failed check for {p}. Expected {v}, but got {actual_v}.") @parameterized.expand(optim_test_params, skip_on_empty=True) def test_optim_supported(self, training_args: TrainingArguments, expected_cls, expected_kwargs): # exercises all the valid --optim options self.check_optim_and_kwargs(training_args, expected_cls, expected_kwargs) trainer = get_regression_trainer(**training_args.to_dict()) trainer.train() def test_fused_adam(self): # Pretend that apex is installed and mock apex.optimizers.FusedAdam exists. # Trainer.get_optimizer_cls_and_kwargs does not use FusedAdam. It only has to return the # class given, so mocking apex.optimizers.FusedAdam should be fine for testing and allow # the test to run without requiring an apex installation. mock = Mock() modules = { "apex": mock, "apex.optimizers": mock.optimizers, "apex.optimizers.FusedAdam": mock.optimizers.FusedAdam, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None"), mock.optimizers.FusedAdam, default_adam_kwargs, ) def test_fused_adam_no_apex(self): args = TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir="None") # Pretend that apex does not exist, even if installed. By setting apex to None, importing # apex will fail even if apex is installed. with patch.dict("sys.modules", {"apex.optimizers": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_adam8bit(self): # Pretend that Bits and Bytes is installed and mock bnb.optim.Adam8bit exists. # Trainer.get_optimizer_cls_and_kwargs does not use Adam8bit. It only has to return the # class given, so mocking bnb.optim.Adam8bit should be fine for testing and allow # the test to run without requiring a bnb installation. mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam8bit_alias(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_8BIT, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None"), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_lion(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.LION, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_lion8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.LION_8BIT, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_paged_lion8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_paged_lion(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_LION, output_dir="None"), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_adam8bit_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_adam_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_adam8bit_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_lion_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_LION, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_lion8bit_no_bnb(self): args = TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir="None") # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if bnb is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_anyprecision_adamw(self): # Pretend that torchdistx is installed and mock torchdistx.optimizers.AnyPrecisionAdamW exists. # Trainer.get_optimizer_cls_and_kwargs does not use AnyPrecisioinAdamW. It only has to return the # class given, so mocking torchdistx.optimizers.AnyPrecisionAdamW should be fine for testing and allow # the test to run without requiring a bnb installation. mock = Mock() modules = { "torchdistx": mock, "torchdistx.optimizers": mock.optimizers, "torchdistx.optimizers.AnyPrecisionAdamW.": mock.optimizers.AnyPrecisionAdamW, } with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None"), mock.optimizers.AnyPrecisionAdamW, dict(default_adam_kwargs, **default_anyprecision_kwargs), ) def test_no_torchdistx_anyprecision_adamw(self): args = TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir="None") # Pretend that torchdistx does not exist, even if installed. By setting torchdistx to None, importing # torchdistx.optimizers will fail even if torchdistx is installed. with patch.dict("sys.modules", {"torchdistx.optimizers": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) @require_torch @require_wandb class TrainerHyperParameterWandbIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return { "method": "random", "metric": {}, "parameters": { "a": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, "b": {"distribution": "int_uniform", "min": 1, "max": 6}, }, } def model_init(config): if config is None: a = 0 b = 0 else: a = config["a"] b = config["b"] model_config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(model_config) def hp_name(params): return MyTrialShortNamer.shortname(params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="wandb", n_trials=4, anonymous="must" ) class HyperParameterSearchBackendsTest(unittest.TestCase): def test_hyperparameter_search_backends(self): self.assertEqual( list(ALL_HYPERPARAMETER_SEARCH_BACKENDS.keys()), list(HPSearchBackend), ) @require_torch class OptimizerAndModelInspectionTest(unittest.TestCase): def test_get_num_trainable_parameters(self): model = nn.Sequential(nn.Linear(128, 64), nn.Linear(64, 32)) # in_features * out_features + bias layer_1 = 128 * 64 + 64 layer_2 = 64 * 32 + 32 with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer(model=model, args=TrainingArguments(output_dir=tmp_dir, report_to="none")) self.assertEqual(trainer.get_num_trainable_parameters(), layer_1 + layer_2) # Freeze the last layer for param in model[-1].parameters(): param.requires_grad = False self.assertEqual(trainer.get_num_trainable_parameters(), layer_1) def test_get_learning_rates(self): model = nn.Sequential(nn.Linear(128, 64)) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer(model=model, args=TrainingArguments(output_dir=tmp_dir, report_to="none")) with self.assertRaises(ValueError): trainer.get_learning_rates() trainer.create_optimizer() self.assertEqual(trainer.get_learning_rates(), [5e-05, 5e-05]) def test_get_optimizer_group(self): model = nn.Sequential(nn.Linear(128, 64)) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer(model=model, args=TrainingArguments(output_dir=tmp_dir, report_to="none")) # ValueError is raised if optimizer is None with self.assertRaises(ValueError): trainer.get_optimizer_group() trainer.create_optimizer() # Get groups num_groups = len(trainer.get_optimizer_group()) self.assertEqual(num_groups, 2) # Get group of parameter param = next(model.parameters()) group = trainer.get_optimizer_group(param) self.assertIn(param, group["params"])
transformers/tests/trainer/test_trainer.py/0
{ "file_path": "transformers/tests/trainer/test_trainer.py", "repo_id": "transformers", "token_count": 90644 }
410
import unittest import warnings from dataclasses import dataclass from transformers.convert_slow_tokenizer import SpmConverter from transformers.testing_utils import get_tests_dir @dataclass class FakeOriginalTokenizer: vocab_file: str class ConvertSlowTokenizerTest(unittest.TestCase): def test_spm_converter_bytefallback_warning(self): spm_model_file_without_bytefallback = get_tests_dir("fixtures/test_sentencepiece.model") spm_model_file_with_bytefallback = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model") original_tokenizer_without_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_without_bytefallback) with warnings.catch_warnings(record=True) as w: _ = SpmConverter(original_tokenizer_without_bytefallback) self.assertEqual(len(w), 0) original_tokenizer_with_bytefallback = FakeOriginalTokenizer(vocab_file=spm_model_file_with_bytefallback) with warnings.catch_warnings(record=True) as w: _ = SpmConverter(original_tokenizer_with_bytefallback) self.assertEqual(len(w), 1) self.assertIn( "The sentencepiece tokenizer that you are converting to a fast tokenizer uses the byte fallback option" " which is not implemented in the fast tokenizers.", str(w[0].message), )
transformers/tests/utils/test_convert_slow_tokenizer.py/0
{ "file_path": "transformers/tests/utils/test_convert_slow_tokenizer.py", "repo_id": "transformers", "token_count": 524 }
411
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import copy import os import tempfile from importlib import import_module from math import isnan from transformers import is_tf_available from transformers.models.auto import get_values from transformers.testing_utils import _tf_gpu_memory_limit, require_tf, slow from ..test_modeling_tf_common import ids_tensor if is_tf_available(): import numpy as np import tensorflow as tf from transformers import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, ) from transformers.modeling_tf_utils import keras if _tf_gpu_memory_limit is not None: gpus = tf.config.list_physical_devices("GPU") for gpu in gpus: # Restrict TensorFlow to only allocate x GB of memory on the GPUs try: tf.config.set_logical_device_configuration( gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)] ) logical_gpus = tf.config.list_logical_devices("GPU") print("Logical GPUs", logical_gpus) except RuntimeError as e: # Virtual devices must be set before GPUs have been initialized print(e) @require_tf class TFCoreModelTesterMixin: model_tester = None all_model_classes = () all_generative_model_classes = () test_mismatched_shapes = True test_resize_embeddings = True test_head_masking = True is_encoder_decoder = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict: inputs_dict = copy.deepcopy(inputs_dict) if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict = { k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1)) if isinstance(v, tf.Tensor) and v.ndim > 0 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING): inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING): inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING), ]: inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) elif model_class in [ *get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING), ]: inputs_dict["labels"] = tf.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32 ) return inputs_dict @slow def test_graph_mode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: inputs = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @tf.function def run_in_graph_mode(): return model(inputs) outputs = run_in_graph_mode() self.assertIsNotNone(outputs) @slow def test_xla_mode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: inputs = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @tf.function(experimental_compile=True) def run_in_graph_mode(): return model(inputs) outputs = run_in_graph_mode() self.assertIsNotNone(outputs) @slow def test_xla_fit(self): # This is a copy of the test_keras_fit method, but we use XLA compilation instead of eager config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: model = model_class(config) if getattr(model, "hf_compute_loss", None): # Test that model correctly compute the loss with kwargs prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True) # Is there a better way to remove these decoder inputs? prepared_for_class = { key: val for key, val in prepared_for_class.items() if key not in ("head_mask", "decoder_head_mask", "cross_attn_head_mask", "decoder_input_ids") } possible_label_cols = { "labels", "label", "label_ids", "start_positions", "start_position", "end_positions", "end_position", "next_sentence_label", } label_names = possible_label_cols.intersection(set(prepared_for_class)) self.assertGreater(len(label_names), 0, msg="No matching label names found!") labels = {key: val for key, val in prepared_for_class.items() if key in label_names} inputs_minus_labels = {key: val for key, val in prepared_for_class.items() if key not in label_names} self.assertGreater(len(inputs_minus_labels), 0) # Make sure it works with XLA! model.compile(optimizer=keras.optimizers.SGD(0.0), jit_compile=True) # Make sure the model fits without crashing regardless of where we pass the labels history = model.fit( prepared_for_class, validation_data=prepared_for_class, steps_per_epoch=1, validation_steps=1, shuffle=False, verbose=0, ) loss = history.history["loss"][0] self.assertTrue(not isnan(loss)) val_loss = history.history["val_loss"][0] self.assertTrue(not isnan(val_loss)) # Now test it with separate labels, to make sure that path works in XLA too. model = model_class(config) model.compile(optimizer=keras.optimizers.SGD(0.0), jit_compile=True) history = model.fit( inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), steps_per_epoch=1, validation_steps=1, shuffle=False, verbose=0, ) loss = history.history["loss"][0] self.assertTrue(not isnan(loss)) val_loss = history.history["val_loss"][0] self.assertTrue(not isnan(val_loss)) @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes[:2]: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) model.build_in_name_scope() num_out = len(model(class_inputs_dict)) for key in list(class_inputs_dict.keys()): # Remove keys not in the serving signature, as the SavedModel will not be compiled to deal with them if key not in model.input_signature: del class_inputs_dict[key] # Check it's a tensor, in case the inputs dict has some bools in it too elif isinstance(class_inputs_dict[key], tf.Tensor) and class_inputs_dict[key].dtype.is_integer: class_inputs_dict[key] = tf.cast(class_inputs_dict[key], tf.int32) if set(class_inputs_dict.keys()) != set(model.input_signature.keys()): continue # Some models have inputs that the preparation functions don't create, we skip those with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) if self.is_encoder_decoder: output_hidden_states = outputs["encoder_hidden_states"] output_attentions = outputs["encoder_attentions"] else: output_hidden_states = outputs["hidden_states"] output_attentions = outputs["attentions"] self.assertEqual(len(outputs), num_out) expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(output_hidden_states), expected_num_layers) self.assertListEqual( list(output_hidden_states[0].shape[-2:]), [self.model_tester.seq_length, self.model_tester.hidden_size], ) self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(output_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) @slow def test_mixed_precision(self): keras.mixed_precision.set_global_policy("mixed_float16") # try/finally block to ensure subsequent tests run in float32 try: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) outputs = model(class_inputs_dict) self.assertIsNotNone(outputs) finally: keras.mixed_precision.set_global_policy("float32") @slow def test_train_pipeline_custom_model(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # head_mask and decoder_head_mask has different shapes than other input args if "head_mask" in inputs_dict: del inputs_dict["head_mask"] if "decoder_head_mask" in inputs_dict: del inputs_dict["decoder_head_mask"] if "cross_attn_head_mask" in inputs_dict: del inputs_dict["cross_attn_head_mask"] tf_main_layer_classes = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith("MainLayer") for module_member in (getattr(module, module_member_name),) if isinstance(module_member, type) and keras.layers.Layer in module_member.__bases__ and getattr(module_member, "_keras_serializable", False) } for main_layer_class in tf_main_layer_classes: # T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter if "T5" in main_layer_class.__name__: # Take the same values than in TFT5ModelTester for this shared layer shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared") config.use_cache = False main_layer = main_layer_class(config, embed_tokens=shared) else: main_layer = main_layer_class(config) symbolic_inputs = { name: keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() } if hasattr(self.model_tester, "num_labels"): num_labels = self.model_tester.num_labels else: num_labels = 2 X = tf.data.Dataset.from_tensor_slices( (inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1))) ).batch(1) hidden_states = main_layer(symbolic_inputs)[0] outputs = keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states) model = keras.models.Model(inputs=symbolic_inputs, outputs=[outputs]) model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"]) model.fit(X, epochs=1) with tempfile.TemporaryDirectory() as tmpdirname: filepath = os.path.join(tmpdirname, "keras_model.h5") model.save(filepath) if "T5" in main_layer_class.__name__: model = keras.models.load_model( filepath, custom_objects={ main_layer_class.__name__: main_layer_class, "TFSharedEmbeddings": TFSharedEmbeddings, }, ) else: model = keras.models.load_model( filepath, custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(model, keras.Model) model(inputs_dict) @slow def test_graph_mode_with_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:2]: model = model_class(config) inputs = copy.deepcopy(inputs_dict) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) if not self.is_encoder_decoder: inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids) else: inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids) inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids) inputs = self._prepare_for_class(inputs, model_class) @tf.function def run_in_graph_mode(): return model(inputs) outputs = run_in_graph_mode() self.assertIsNotNone(outputs) def _generate_random_bad_tokens(self, num_bad_tokens, model): # special tokens cannot be bad tokens special_tokens = [] if model.config.bos_token_id is not None: special_tokens.append(model.config.bos_token_id) if model.config.pad_token_id is not None: special_tokens.append(model.config.pad_token_id) if model.config.eos_token_id is not None: special_tokens.append(model.config.eos_token_id) # create random bad tokens that are not special tokens bad_tokens = [] while len(bad_tokens) < num_bad_tokens: token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0] if token not in special_tokens: bad_tokens.append(token) return bad_tokens def _check_generated_ids(self, output_ids): for token_id in output_ids[0].numpy().tolist(): self.assertGreaterEqual(token_id, 0) self.assertLess(token_id, self.model_tester.vocab_size) def _check_match_tokens(self, generated_ids, bad_words_ids): # for all bad word tokens for bad_word_ids in bad_words_ids: # for all slices in batch for generated_ids_slice in generated_ids: # for all word idx for i in range(len(bad_word_ids), len(generated_ids_slice)): # if tokens match if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids: return True return False
transformers/tests/utils/test_modeling_tf_core.py/0
{ "file_path": "transformers/tests/utils/test_modeling_tf_core.py", "repo_id": "transformers", "token_count": 9190 }
412
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script is responsible for making sure the dummies in utils/dummies_xxx.py are up to date with the main init. Why dummies? This is to make sure that a user can always import all objects from `transformers`, even if they don't have the necessary extra libs installed. Those objects will then raise helpful error message whenever the user tries to access one of their methods. Usage (from the root of the repo): Check that the dummy files are up to date (used in `make repo-consistency`): ```bash python utils/check_dummies.py ``` Update the dummy files if needed (used in `make fix-copies`): ```bash python utils/check_dummies.py --fix_and_overwrite ``` """ import argparse import os import re from typing import Dict, List, Optional # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py PATH_TO_TRANSFORMERS = "src/transformers" # Matches is_xxx_available() _re_backend = re.compile(r"is\_([a-z_]*)_available()") # Matches from xxx import bla _re_single_line_import = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n") # Matches if not is_xxx_available() _re_test_backend = re.compile(r"^\s+if\s+not\s+\(?is\_[a-z_]*\_available\(\)") # Template for the dummy objects. DUMMY_CONSTANT = """ {0} = None """ DUMMY_CLASS = """ class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) """ DUMMY_FUNCTION = """ def {0}(*args, **kwargs): requires_backends({0}, {1}) """ def find_backend(line: str) -> Optional[str]: """ Find one (or multiple) backend in a code line of the init. Args: line (`str`): A code line in an init file. Returns: Optional[`str`]: If one (or several) backend is found, returns it. In the case of multiple backends (the line contains `if is_xxx_available() and `is_yyy_available()`) returns all backends joined on `_and_` (so `xxx_and_yyy` for instance). """ if _re_test_backend.search(line) is None: return None backends = [b[0] for b in _re_backend.findall(line)] backends.sort() return "_and_".join(backends) def read_init() -> Dict[str, List[str]]: """ Read the init and extract backend-specific objects. Returns: Dict[str, List[str]]: A dictionary mapping backend name to the list of object names requiring that backend. """ with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() # Get to the point we do the actual imports for type checking line_index = 0 while not lines[line_index].startswith("if TYPE_CHECKING"): line_index += 1 backend_specific_objects = {} # Go through the end of the file while line_index < len(lines): # If the line is an if is_backend_available, we grab all objects associated. backend = find_backend(lines[line_index]) if backend is not None: while not lines[line_index].startswith(" else:"): line_index += 1 line_index += 1 objects = [] # Until we unindent, add backend objects to the list while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8): line = lines[line_index] single_line_import_search = _re_single_line_import.search(line) if single_line_import_search is not None: # Single-line imports objects.extend(single_line_import_search.groups()[0].split(", ")) elif line.startswith(" " * 12): # Multiple-line imports (with 3 indent level) objects.append(line[12:-2]) line_index += 1 backend_specific_objects[backend] = objects else: line_index += 1 return backend_specific_objects def create_dummy_object(name: str, backend_name: str) -> str: """ Create the code for a dummy object. Args: name (`str`): The name of the object. backend_name (`str`): The name of the backend required for that object. Returns: `str`: The code of the dummy object. """ if name.isupper(): return DUMMY_CONSTANT.format(name) elif name.islower(): return DUMMY_FUNCTION.format(name, backend_name) else: return DUMMY_CLASS.format(name, backend_name) def create_dummy_files(backend_specific_objects: Optional[Dict[str, List[str]]] = None) -> Dict[str, str]: """ Create the content of the dummy files. Args: backend_specific_objects (`Dict[str, List[str]]`, *optional*): The mapping backend name to list of backend-specific objects. If not passed, will be obtained by calling `read_init()`. Returns: `Dict[str, str]`: A dictionary mapping backend name to code of the corresponding backend file. """ if backend_specific_objects is None: backend_specific_objects = read_init() dummy_files = {} for backend, objects in backend_specific_objects.items(): backend_name = "[" + ", ".join(f'"{b}"' for b in backend.split("_and_")) + "]" dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects]) dummy_files[backend] = dummy_file return dummy_files def check_dummies(overwrite: bool = False): """ Check if the dummy files are up to date and maybe `overwrite` with the right content. Args: overwrite (`bool`, *optional*, default to `False`): Whether or not to overwrite the content of the dummy files. Will raise an error if they are not up to date when `overwrite=False`. """ dummy_files = create_dummy_files() # For special correspondence backend name to shortcut as used in utils/dummy_xxx_objects.py short_names = {"torch": "pt"} # Locate actual dummy modules and read their content. path = os.path.join(PATH_TO_TRANSFORMERS, "utils") dummy_file_paths = { backend: os.path.join(path, f"dummy_{short_names.get(backend, backend)}_objects.py") for backend in dummy_files.keys() } actual_dummies = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(file_path): with open(file_path, "r", encoding="utf-8", newline="\n") as f: actual_dummies[backend] = f.read() else: actual_dummies[backend] = "" # Compare actual with what they should be. for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"Updating transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main " "__init__ has new objects." ) with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n") as f: f.write(dummy_files[backend]) else: raise ValueError( "The main __init__ has objects that are not present in " f"transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py. Run `make fix-copies` " "to fix this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") args = parser.parse_args() check_dummies(args.fix_and_overwrite)
transformers/utils/check_dummies.py/0
{ "file_path": "transformers/utils/check_dummies.py", "repo_id": "transformers", "token_count": 3338 }
413
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility that sorts the names in the auto mappings defines in the auto modules in alphabetical order. Use from the root of the repo with: ```bash python utils/sort_auto_mappings.py ``` to auto-fix all the auto mappings (used in `make style`). To only check if the mappings are properly sorted (as used in `make quality`), do: ```bash python utils/sort_auto_mappings.py --check_only ``` """ import argparse import os import re from typing import Optional # Path are set with the intent you should run this script from the root of the repo. PATH_TO_AUTO_MODULE = "src/transformers/models/auto" # re pattern that matches mapping introductions: # SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict _re_intro_mapping = re.compile(r"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict") # re pattern that matches identifiers in mappings _re_identifier = re.compile(r'\s*\(\s*"(\S[^"]+)"') def sort_auto_mapping(fname: str, overwrite: bool = False) -> Optional[bool]: """ Sort all auto mappings in a file. Args: fname (`str`): The name of the file where we want to sort auto-mappings. overwrite (`bool`, *optional*, defaults to `False`): Whether or not to fix and overwrite the file. Returns: `Optional[bool]`: Returns `None` if `overwrite=True`. Otherwise returns `True` if the file has an auto-mapping improperly sorted, `False` if the file is okay. """ with open(fname, "r", encoding="utf-8") as f: content = f.read() lines = content.split("\n") new_lines = [] line_idx = 0 while line_idx < len(lines): if _re_intro_mapping.search(lines[line_idx]) is not None: # Start of a new mapping! indent = len(re.search(r"^(\s*)\S", lines[line_idx]).groups()[0]) + 8 while not lines[line_idx].startswith(" " * indent + "("): new_lines.append(lines[line_idx]) line_idx += 1 blocks = [] while lines[line_idx].strip() != "]": # Blocks either fit in one line or not if lines[line_idx].strip() == "(": start_idx = line_idx while not lines[line_idx].startswith(" " * indent + ")"): line_idx += 1 blocks.append("\n".join(lines[start_idx : line_idx + 1])) else: blocks.append(lines[line_idx]) line_idx += 1 # Sort blocks by their identifiers blocks = sorted(blocks, key=lambda x: _re_identifier.search(x).groups()[0]) new_lines += blocks else: new_lines.append(lines[line_idx]) line_idx += 1 if overwrite: with open(fname, "w", encoding="utf-8") as f: f.write("\n".join(new_lines)) else: return "\n".join(new_lines) != content def sort_all_auto_mappings(overwrite: bool = False): """ Sort all auto mappings in the library. Args: overwrite (`bool`, *optional*, defaults to `False`): Whether or not to fix and overwrite the file. """ fnames = [os.path.join(PATH_TO_AUTO_MODULE, f) for f in os.listdir(PATH_TO_AUTO_MODULE) if f.endswith(".py")] diffs = [sort_auto_mapping(fname, overwrite=overwrite) for fname in fnames] if not overwrite and any(diffs): failures = [f for f, d in zip(fnames, diffs) if d] raise ValueError( f"The following files have auto mappings that need sorting: {', '.join(failures)}. Run `make style` to fix" " this." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.") args = parser.parse_args() sort_all_auto_mappings(not args.check_only)
transformers/utils/sort_auto_mappings.py/0
{ "file_path": "transformers/utils/sort_auto_mappings.py", "repo_id": "transformers", "token_count": 1813 }
414
<div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/trl_banner_dark.png"> </div> # TRL - Transformer Reinforcement Learning > Full stack library to fine-tune and align large language models. <p align="center"> <a href="https://github.com/huggingface/trl/blob/main/LICENSE"> <img alt="License" src="https://img.shields.io/github/license/huggingface/trl.svg?color=blue"> </a> <a href="https://huggingface.co/docs/trl/index"> <img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/trl/index.svg?down_color=red&down_message=offline&up_message=online"> </a> <a href="https://github.com/huggingface/trl/releases"> <img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/trl.svg"> </a> </p> ## What is it? The `trl` library is a full stack tool to fine-tune and align transformer language and diffusion models using methods such as Supervised Fine-tuning step (SFT), Reward Modeling (RM) and the Proximal Policy Optimization (PPO) as well as Direct Preference Optimization (DPO). The library is built on top of the [`transformers`](https://github.com/huggingface/transformers) library and thus allows to use any model architecture available there. ## Highlights - **`Efficient and scalable`**: - [`accelerate`](https://github.com/huggingface/accelerate) is the backbone of `trl` which allows to scale model training from a single GPU to a large scale multi-node cluster with methods such as DDP and DeepSpeed. - [`PEFT`](https://github.com/huggingface/peft) is fully integrated and allows to train even the largest models on modest hardware with quantisation and methods such as LoRA or QLoRA. - [`unsloth`](https://github.com/unslothai/unsloth) is also integrated and allows to significantly speed up training with dedicated kernels. - **`CLI`**: With the [CLI](https://huggingface.co/docs/trl/clis) you can fine-tune and chat with LLMs without writing any code using a single command and a flexible config system. - **`Trainers`**: The Trainer classes are an abstraction to apply many fine-tuning methods with ease such as the [`SFTTrainer`](https://huggingface.co/docs/trl/sft_trainer), [`DPOTrainer`](https://huggingface.co/docs/trl/trainer#trl.DPOTrainer), [`RewardTrainer`](https://huggingface.co/docs/trl/reward_trainer), [`PPOTrainer`](https://huggingface.co/docs/trl/trainer#trl.PPOTrainer), [`CPOTrainer`](https://huggingface.co/docs/trl/trainer#trl.CPOTrainer), and [`ORPOTrainer`](https://huggingface.co/docs/trl/trainer#trl.ORPOTrainer). - **`AutoModels`**: The [`AutoModelForCausalLMWithValueHead`](https://huggingface.co/docs/trl/models#trl.AutoModelForCausalLMWithValueHead) & [`AutoModelForSeq2SeqLMWithValueHead`](https://huggingface.co/docs/trl/models#trl.AutoModelForSeq2SeqLMWithValueHead) classes add an additional value head to the model which allows to train them with RL algorithms such as PPO. - **`Examples`**: Train GPT2 to generate positive movie reviews with a BERT sentiment classifier, full RLHF using adapters only, train GPT-j to be less toxic, [StackLlama example](https://huggingface.co/blog/stackllama), etc. following the [examples](https://github.com/huggingface/trl/tree/main/examples). ## Installation ### Python package Install the library with `pip`: ```bash pip install trl ``` ### From source If you want to use the latest features before an official release you can install from source: ```bash pip install git+https://github.com/huggingface/trl.git ``` ### Repository If you want to use the examples you can clone the repository with the following command: ```bash git clone https://github.com/huggingface/trl.git ``` ## Command Line Interface (CLI) You can use TRL Command Line Interface (CLI) to quickly get started with Supervised Fine-tuning (SFT), Direct Preference Optimization (DPO) and test your aligned model with the chat CLI: **SFT:** ```bash trl sft --model_name_or_path facebook/opt-125m --dataset_name imdb --output_dir opt-sft-imdb ``` **DPO:** ```bash trl dpo --model_name_or_path facebook/opt-125m --dataset_name trl-internal-testing/hh-rlhf-helpful-base-trl-style --output_dir opt-sft-hh-rlhf ``` **Chat:** ```bash trl chat --model_name_or_path Qwen/Qwen1.5-0.5B-Chat ``` Read more about CLI in the [relevant documentation section](https://huggingface.co/docs/trl/main/en/clis) or use `--help` for more details. ## How to use For more flexibility and control over the training, you can use the dedicated trainer classes to fine-tune the model in Python. ### `SFTTrainer` This is a basic example of how to use the `SFTTrainer` from the library. The `SFTTrainer` is a light wrapper around the `transformers` Trainer to easily fine-tune language models or adapters on a custom dataset. ```python # imports from datasets import load_dataset from trl import SFTTrainer # get dataset dataset = load_dataset("imdb", split="train") # get trainer trainer = SFTTrainer( "facebook/opt-350m", train_dataset=dataset, dataset_text_field="text", max_seq_length=512, ) # train trainer.train() ``` ### `RewardTrainer` This is a basic example of how to use the `RewardTrainer` from the library. The `RewardTrainer` is a wrapper around the `transformers` Trainer to easily fine-tune reward models or adapters on a custom preference dataset. ```python # imports from transformers import AutoModelForSequenceClassification, AutoTokenizer from trl import RewardTrainer # load model and dataset - dataset needs to be in a specific format model = AutoModelForSequenceClassification.from_pretrained("gpt2", num_labels=1) tokenizer = AutoTokenizer.from_pretrained("gpt2") ... # load trainer trainer = RewardTrainer( model=model, tokenizer=tokenizer, train_dataset=dataset, ) # train trainer.train() ``` ### `PPOTrainer` This is a basic example of how to use the `PPOTrainer` from the library. Based on a query the language model creates a response which is then evaluated. The evaluation could be a human in the loop or another model's output. ```python # imports import torch from transformers import AutoTokenizer from trl import PPOTrainer, PPOConfig, AutoModelForCausalLMWithValueHead, create_reference_model from trl.core import respond_to_batch # get models model = AutoModelForCausalLMWithValueHead.from_pretrained('gpt2') ref_model = create_reference_model(model) tokenizer = AutoTokenizer.from_pretrained('gpt2') tokenizer.pad_token = tokenizer.eos_token # initialize trainer ppo_config = PPOConfig(batch_size=1, mini_batch_size=1) # encode a query query_txt = "This morning I went to the " query_tensor = tokenizer.encode(query_txt, return_tensors="pt") # get model response response_tensor = respond_to_batch(model, query_tensor) # create a ppo trainer ppo_trainer = PPOTrainer(ppo_config, model, ref_model, tokenizer) # define a reward for response # (this could be any reward such as human feedback or output from another model) reward = [torch.tensor(1.0)] # train model for one step with ppo train_stats = ppo_trainer.step([query_tensor[0]], [response_tensor[0]], reward) ``` ### `DPOTrainer` `DPOTrainer` is a trainer that uses [Direct Preference Optimization algorithm](https://huggingface.co/papers/2305.18290). This is a basic example of how to use the `DPOTrainer` from the library. The `DPOTrainer` is a wrapper around the `transformers` Trainer to easily fine-tune reward models or adapters on a custom preference dataset. ```python # imports from transformers import AutoModelForCausalLM, AutoTokenizer from trl import DPOTrainer # load model and dataset - dataset needs to be in a specific format model = AutoModelForCausalLM.from_pretrained("gpt2") tokenizer = AutoTokenizer.from_pretrained("gpt2") ... # load trainer trainer = DPOTrainer( model=model, tokenizer=tokenizer, train_dataset=dataset, ) # train trainer.train() ``` ## Development If you want to contribute to `trl` or customizing it to your needs make sure to read the [contribution guide](https://github.com/huggingface/trl/blob/main/CONTRIBUTING.md) and make sure you make a dev install: ```bash git clone https://github.com/huggingface/trl.git cd trl/ make dev ``` ## References ### Proximal Policy Optimisation The PPO implementation largely follows the structure introduced in the paper **"Fine-Tuning Language Models from Human Preferences"** by D. Ziegler et al. \[[paper](https://huggingface.co/papers/1909.08593), [code](https://github.com/openai/lm-human-preferences)]. ### Direct Preference Optimization DPO is based on the original implementation of **"Direct Preference Optimization: Your Language Model is Secretly a Reward Model"** by E. Mitchell et al. \[[paper](https://huggingface.co/papers/2305.18290), [code](https://github.com/eric-mitchell/direct-preference-optimization)] ## Citation ```bibtex @misc{vonwerra2022trl, author = {Leandro von Werra and Younes Belkada and Lewis Tunstall and Edward Beeching and Tristan Thrush and Nathan Lambert and Shengyi Huang}, title = {TRL: Transformer Reinforcement Learning}, year = {2020}, publisher = {GitHub}, journal = {GitHub repository}, howpublished = {\url{https://github.com/huggingface/trl}} } ```
trl/README.md/0
{ "file_path": "trl/README.md", "repo_id": "trl", "token_count": 3018 }
415
# Installation You can install TRL either from pypi or from source: ## pypi Install the library with pip: ```bash pip install trl ``` ### Source You can also install the latest version from source. First clone the repo and then run the installation with `pip`: ```bash git clone https://github.com/huggingface/trl.git cd trl/ pip install -e . ``` If you want the development install you can replace the pip install with the following: ```bash pip install -e ".[dev]" ```
trl/docs/source/installation.mdx/0
{ "file_path": "trl/docs/source/installation.mdx", "repo_id": "trl", "token_count": 147 }
416
# Sentiment Tuning Examples The notebooks and scripts in this examples show how to fine-tune a model with a sentiment classifier (such as `lvwerra/distilbert-imdb`). Here's an overview of the notebooks and scripts in the [trl repository](https://github.com/huggingface/trl/tree/main/examples): | File | Description | |------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------| | [`examples/scripts/ppo.py`](https://github.com/huggingface/trl/blob/main/examples/scripts/ppo.py) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/trl/blob/main/examples/sentiment/notebooks/gpt2-sentiment.ipynb) | This script shows how to use the `PPOTrainer` to fine-tune a sentiment analysis model using IMDB dataset | | [`examples/notebooks/gpt2-sentiment.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/gpt2-sentiment.ipynb) | This notebook demonstrates how to reproduce the GPT2 imdb sentiment tuning example on a jupyter notebook. | | [`examples/notebooks/gpt2-control.ipynb`](https://github.com/huggingface/trl/tree/main/examples/notebooks/gpt2-control.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/trl/blob/main/examples/sentiment/notebooks/gpt2-sentiment-control.ipynb) | This notebook demonstrates how to reproduce the GPT2 sentiment control example on a jupyter notebook. ## Usage ```bash # 1. run directly python examples/scripts/ppo.py # 2. run via `accelerate` (recommended), enabling more features (e.g., multiple GPUs, deepspeed) accelerate config # will prompt you to define the training configuration accelerate launch examples/scripts/ppo.py # launches training # 3. get help text and documentation python examples/scripts/ppo.py --help # 4. configure logging with wandb and, say, mini_batch_size=1 and gradient_accumulation_steps=16 python examples/scripts/ppo.py --log_with wandb --mini_batch_size 1 --gradient_accumulation_steps 16 ``` Note: if you don't want to log with `wandb` remove `log_with="wandb"` in the scripts/notebooks. You can also replace it with your favourite experiment tracker that's [supported by `accelerate`](https://huggingface.co/docs/accelerate/usage_guides/tracking). ## Few notes on multi-GPU To run in multi-GPU setup with DDP (distributed Data Parallel) change the `device_map` value to `device_map={"": Accelerator().process_index}` and make sure to run your script with `accelerate launch yourscript.py`. If you want to apply naive pipeline parallelism you can use `device_map="auto"`. ## Benchmarks Below are some benchmark results for `examples/scripts/ppo.py`. To reproduce locally, please check out the `--command` arguments below. ```bash python benchmark/benchmark.py \ --command "python examples/scripts/ppo.py --log_with wandb" \ --num-seeds 5 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template ``` ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/benchmark/v0.4.7-55-g110e672/sentiment.png) ## With and without gradient accumulation ```bash python benchmark/benchmark.py \ --command "python examples/scripts/ppo.py --exp_name sentiment_tuning_step_grad_accu --mini_batch_size 1 --gradient_accumulation_steps 128 --log_with wandb" \ --num-seeds 5 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template ``` ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/benchmark/v0.4.7-55-g110e672/gradient_accu.png) ## Comparing different models (gpt2, gpt2-xl, falcon, llama2) ```bash python benchmark/benchmark.py \ --command "python examples/scripts/ppo.py --exp_name sentiment_tuning_gpt2 --log_with wandb" \ --num-seeds 5 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template python benchmark/benchmark.py \ --command "python examples/scripts/ppo.py --exp_name sentiment_tuning_gpt2xl_grad_accu --model_name gpt2-xl --mini_batch_size 16 --gradient_accumulation_steps 8 --log_with wandb" \ --num-seeds 5 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template python benchmark/benchmark.py \ --command "python examples/scripts/ppo.py --exp_name sentiment_tuning_falcon_rw_1b --model_name tiiuae/falcon-rw-1b --log_with wandb" \ --num-seeds 5 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template ``` ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/benchmark/v0.4.7-55-g110e672/different_models.png) ## With and without PEFT ``` python benchmark/benchmark.py \ --command "python examples/scripts/ppo.py --exp_name sentiment_tuning_peft --use_peft --log_with wandb" \ --num-seeds 5 \ --start-seed 1 \ --workers 10 \ --slurm-nodes 1 \ --slurm-gpus-per-task 1 \ --slurm-ntasks 1 \ --slurm-total-cpus 12 \ --slurm-template-path benchmark/trl.slurm_template ``` ![](https://huggingface.co/datasets/trl-internal-testing/example-images/resolve/main/images/benchmark/v0.4.7-55-g110e672/peft.png)
trl/docs/source/sentiment_tuning.mdx/0
{ "file_path": "trl/docs/source/sentiment_tuning.mdx", "repo_id": "trl", "token_count": 2400 }
417
import sys from dataclasses import dataclass, field from typing import Optional from datasets import load_dataset from huggingface_hub import HfApi from huggingface_hub.repocard import RepoCard from transformers import HfArgumentParser """ # debug python -i examples/datasets/tldr_preference.py --debug --push_to_hub # actual push python examples/datasets/tldr_preference.py --push_to_hub --hf_entity trl-internal-testing """ api = HfApi() @dataclass class ScriptArguments: debug: Optional[bool] = field(default=False, metadata={"help": "Enable debug mode"}) hf_entity: Optional[str] = field(default=None, metadata={"help": "The Hugging Face entity to use"}) hf_repo_id: Optional[str] = field( default="tldr-preference-trl-style", metadata={"help": "The Hugging Face repository ID"} ) sft_hf_repo_id: Optional[str] = field( default="tldr-preference-sft-trl-style", metadata={"help": "The Hugging Face repository ID"} ) revision: Optional[str] = field(default="0.1.0", metadata={"help": "The revision of the repository"}) update_main_revision: Optional[bool] = field( default=True, metadata={"help": "Update the main revision of the repository"} ) push_to_hub: Optional[bool] = field(default=False, metadata={"help": "Push the dataset to the Hugging Face Hub"}) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "The number of workers to use to tokenize the data"} ) if __name__ == "__main__": args = HfArgumentParser(ScriptArguments).parse_args_into_dataclasses()[0] if args.hf_entity is None: args.hf_entity = api.whoami()["name"] full_repo_id = f"{args.hf_entity}/{args.hf_repo_id}" full_sft_repo_id = f"{args.hf_entity}/{args.sft_hf_repo_id}" ################ # Preference dataset ################ ds = load_dataset("openai/summarize_from_feedback", "comparisons") if args.debug: for key in ds: ds[key] = ds[key].select(range(50)) cnndm_batches = ["batch0_cnndm", "cnndm0", "cnndm2"] if not args.debug: ds["validation_cnndm"] = ds["validation"].filter( lambda x: x["batch"] in cnndm_batches, num_proc=args.dataset_num_proc ) ds["validation"] = ds["validation"].filter( lambda x: x["batch"] not in cnndm_batches, num_proc=args.dataset_num_proc ) tldr_format_str = "SUBREDDIT: r/{subreddit}\n\nTITLE: {title}\n\nPOST: {post}\n\nTL;DR:" cnndm_format_str = "Article:\n{article}\n\nTL;DR:" def process(row): format_str = cnndm_format_str if row["batch"] in cnndm_batches else tldr_format_str row["prompt"] = format_str.format(**row["info"]) choice = row["choice"] # need to remove the leading space chosen = row["summaries"][choice]["text"].strip() rejected = row["summaries"][1 - choice]["text"].strip() row["chosen"] = [{"role": "user", "content": row["prompt"]}, {"role": "assistant", "content": chosen}] row["rejected"] = [{"role": "user", "content": row["prompt"]}, {"role": "assistant", "content": rejected}] return row ds = ds.map(process, num_proc=args.dataset_num_proc) for key in ds: # reorder columns ds[key] = ds[key].select_columns( ["prompt", "chosen", "rejected", "info", "summaries", "choice", "worker", "batch", "split", "extra"] ) if args.push_to_hub: revisions = ["main"] if args.update_main_revision else [] revisions.append(args.revision) # get the commnad used to run the script run_command = " ".join(["python"] + sys.argv) for revision in revisions: ds.push_to_hub(full_repo_id, revision=revision) repo_full_url = f"https://huggingface.co/datasets/{full_repo_id}/tree/{revision}" # get the name of the current file file_name = __file__.split("/")[-1] api.upload_file( path_or_fileobj=__file__, path_in_repo=file_name, revision=revision, repo_id=full_repo_id, repo_type="dataset", ) preference_card = RepoCard.load( full_repo_id, repo_type="dataset", ) preference_card.text = f"""\ # TRL's TL;DR Preference Dataset We preprocess the dataset using our standard `prompt, chosen, rejected` format. ## Source of the dataset We take the dataset from https://huggingface.co/datasets/openai/summarize_from_feedback. ## Reproduce this dataset 1. Download the `{file_name}` from the {repo_full_url}. 2. Run `{run_command}` """ preference_card.push_to_hub( full_repo_id, repo_type="dataset", ) ################ # SFT dataset ################ sft_ds = load_dataset("vwxyzjn/summarize_from_feedback_tldr_3_filtered") if args.debug: for key in sft_ds: sft_ds[key] = sft_ds[key].select(range(50)) def sft_process(row): row["prompt"] = tldr_format_str.format(**row) row["messages"] = [ {"role": "user", "content": row["prompt"]}, {"role": "assistant", "content": row["summary"]}, ] return row sft_ds = sft_ds.map(sft_process, num_proc=args.dataset_num_proc) for key in sft_ds: # reorder columns sft_ds[key] = sft_ds[key].select_columns(["prompt", "messages", "id", "subreddit", "title", "post", "summary"]) if args.push_to_hub: revisions = ["main"] if args.update_main_revision else [] revisions.append(args.revision) # get the commnad used to run the script run_command = " ".join(["python"] + sys.argv) for revision in revisions: sft_ds.push_to_hub(full_sft_repo_id, revision=revision) repo_full_url = f"https://huggingface.co/datasets/{full_sft_repo_id}/tree/{revision}" # get the name of the current file file_name = __file__.split("/")[-1] api.upload_file( path_or_fileobj=__file__, path_in_repo=file_name, revision=revision, repo_id=full_sft_repo_id, repo_type="dataset", ) sft_card = RepoCard.load( full_sft_repo_id, repo_type="dataset", ) sft_card.text = f"""\ # TRL's TL;DR SFT Dataset We preprocess the dataset using our standard `prompt, messages` format. ## Source of the dataset We take the dataset from https://huggingface.co/datasets/vwxyzjn/summarize_from_feedback_tldr_3_filtered. ## Reproduce this dataset 1. Download the `{file_name}` from the {repo_full_url}. 2. Run `{run_command}` """
trl/examples/datasets/tldr_preference.py/0
{ "file_path": "trl/examples/datasets/tldr_preference.py", "repo_id": "trl", "token_count": 3040 }
418
# Fine-Tune Llama2-7b on SE paired dataset import os from dataclasses import dataclass, field from typing import Optional import torch from accelerate import Accelerator from datasets import load_dataset from peft import AutoPeftModelForCausalLM, LoraConfig from tqdm import tqdm from transformers import ( AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser, set_seed, ) from trl import SFTConfig, SFTTrainer from trl.import_utils import is_npu_available, is_xpu_available from trl.trainer import ConstantLengthDataset @dataclass class ScriptArguments: model_name: Optional[str] = field(default="meta-llama/Llama-2-7b-hf", metadata={"help": "the model name"}) dataset_name: Optional[str] = field(default="lvwerra/stack-exchange-paired", metadata={"help": "the dataset name"}) subset: Optional[str] = field(default="data/finetune", metadata={"help": "the subset to use"}) split: Optional[str] = field(default="train", metadata={"help": "the split to use"}) size_valid_set: Optional[int] = field(default=4000, metadata={"help": "the size of the validation set"}) streaming: Optional[bool] = field(default=True, metadata={"help": "whether to stream the dataset"}) shuffle_buffer: Optional[int] = field(default=5000, metadata={"help": "the shuffle buffer size"}) seq_length: Optional[int] = field(default=1024, metadata={"help": "the sequence length"}) num_workers: Optional[int] = field(default=4, metadata={"help": "the number of workers"}) use_bnb: Optional[bool] = field(default=True, metadata={"help": "whether to use BitsAndBytes"}) # LoraConfig lora_alpha: Optional[float] = field(default=16, metadata={"help": "the lora alpha parameter"}) lora_dropout: Optional[float] = field(default=0.05, metadata={"help": "the lora dropout parameter"}) lora_r: Optional[int] = field(default=8, metadata={"help": "the lora r parameter"}) parser = HfArgumentParser((ScriptArguments, SFTConfig)) script_args, training_args = parser.parse_args_into_dataclasses() peft_config = LoraConfig( r=script_args.lora_r, lora_alpha=script_args.lora_alpha, lora_dropout=script_args.lora_dropout, target_modules=["q_proj", "v_proj"], bias="none", task_type="CAUSAL_LM", ) if training_args.group_by_length and training_args.packing: raise ValueError("Cannot use both packing and group by length") # `gradient_checkpointing` was True by default until `1f3314`, but it's actually not used. # `gradient_checkpointing=True` will cause `Variable._execution_engine.run_backward`. if training_args.gradient_checkpointing: raise ValueError("gradient_checkpointing not supported") set_seed(training_args.seed) def chars_token_ratio(dataset, tokenizer, nb_examples=400): """ Estimate the average number of characters per token in the dataset. """ total_characters, total_tokens = 0, 0 for _, example in tqdm(zip(range(nb_examples), iter(dataset)), total=nb_examples): text = prepare_sample_text(example) total_characters += len(text) if tokenizer.is_fast: total_tokens += len(tokenizer(text).tokens()) else: total_tokens += len(tokenizer.tokenize(text)) return total_characters / total_tokens def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) def prepare_sample_text(example): """Prepare the text from a sample of the dataset.""" text = f"Question: {example['question']}\n\nAnswer: {example['response_j']}" return text def create_datasets(tokenizer, args, seed=None): dataset = load_dataset( args.dataset_name, data_dir=args.subset, split=args.split, use_auth_token=True, num_proc=args.num_workers if not args.streaming else None, streaming=args.streaming, ) if args.streaming: print("Loading the dataset in streaming mode") valid_data = dataset.take(args.size_valid_set) train_data = dataset.skip(args.size_valid_set) train_data = train_data.shuffle(buffer_size=args.shuffle_buffer, seed=seed) else: dataset = dataset.train_test_split(test_size=0.005, seed=seed) train_data = dataset["train"] valid_data = dataset["test"] print(f"Size of the train set: {len(train_data)}. Size of the validation set: {len(valid_data)}") chars_per_token = chars_token_ratio(train_data, tokenizer) print(f"The character to token ratio of the dataset is: {chars_per_token:.2f}") train_dataset = ConstantLengthDataset( tokenizer, train_data, formatting_func=prepare_sample_text, infinite=True, seq_length=args.seq_length, chars_per_token=chars_per_token, ) valid_dataset = ConstantLengthDataset( tokenizer, valid_data, formatting_func=prepare_sample_text, infinite=False, seq_length=args.seq_length, chars_per_token=chars_per_token, ) return train_dataset, valid_dataset bnb_config = None if script_args.use_bnb: bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, ) base_model = AutoModelForCausalLM.from_pretrained( script_args.model_name, quantization_config=bnb_config, device_map={"": Accelerator().local_process_index}, trust_remote_code=True, use_auth_token=True, ) base_model.config.use_cache = False tokenizer = AutoTokenizer.from_pretrained(script_args.model_name, trust_remote_code=True) tokenizer.pad_token = tokenizer.eos_token tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training train_dataset, eval_dataset = create_datasets(tokenizer, script_args, seed=training_args.seed) trainer = SFTTrainer( model=base_model, train_dataset=train_dataset, eval_dataset=eval_dataset, peft_config=peft_config, max_seq_length=None, formatting_func=prepare_sample_text, tokenizer=tokenizer, args=training_args, ) trainer.train() trainer.save_model(training_args.output_dir) output_dir = os.path.join(training_args.output_dir, "final_checkpoint") trainer.model.save_pretrained(output_dir) # Free memory for merging weights del base_model if is_xpu_available(): torch.xpu.empty_cache() elif is_npu_available(): torch.npu.empty_cache() else: torch.cuda.empty_cache() model = AutoPeftModelForCausalLM.from_pretrained(output_dir, device_map="auto", torch_dtype=torch.bfloat16) model = model.merge_and_unload() output_merged_dir = os.path.join(training_args.output_dir, "final_merged_checkpoint") model.save_pretrained(output_merged_dir, safe_serialization=True)
trl/examples/research_projects/stack_llama_2/scripts/sft_llama2.py/0
{ "file_path": "trl/examples/research_projects/stack_llama_2/scripts/sft_llama2.py", "repo_id": "trl", "token_count": 2723 }
419
from dataclasses import dataclass, field from typing import Optional from datasets import load_dataset from transformers import HfArgumentParser from vllm import LLM, SamplingParams from trl import HfPairwiseJudge, OpenAIPairwiseJudge """ Examples: python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/rloo_tldr --num_examples 1000 Model win rate: 31.40% python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/rloo_tldr --judge_model gpt-3.5-turbo-0125 --num_examples 1000 Model win rate: 51.60% python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/rloo_tldr --judge_model gpt-4o-mini --num_examples 1000 Model win rate: 51.20% python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/ppo_tldr --num_examples 1000 Model win rate: 46.30% python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/ppo_tldr --judge_model gpt-3.5-turbo-0125 --num_examples 1000 Model win rate: 52.50% python examples/scripts/evals/judge_tldr.py --model_name_or_path vwxyzjn/ppo_tldr --judge_model gpt-4o-mini --num_examples 1000 Model win rate: 63.00% """ @dataclass class ScriptArguments: model_name_or_path: str = field(metadata={"help": "The model name or path to the model to evaluate."}) judge_model: str = field( default="meta-llama/Meta-Llama-3-70B-Instruct", metadata={ "help": "The model name or path to the model to use as a judge. E.g., 'gpt-3.5-turbo-0125', 'meta-llama/Meta-Llama-3-70B-Instruct'." }, ) num_examples: Optional[int] = field(default=None, metadata={"help": "The number of examples to evaluate."}) # Parse the arguments parser = HfArgumentParser(ScriptArguments) args = parser.parse_args_into_dataclasses()[0] # Load the dataset raw_dataset = load_dataset("trl-internal-testing/tldr-preference-sft-trl-style", split="test") if args.num_examples is not None: raw_dataset = raw_dataset.select(range(args.num_examples)) # Extract the prompts and reference completions prompts = raw_dataset["prompt"] reference_completions = [message[-1]["content"] for message in raw_dataset["messages"]] # Generate the model completions sampling_params = SamplingParams(temperature=0.0, top_p=0.95, max_tokens=200) # very generous max token length llm = LLM(model=args.model_name_or_path, tensor_parallel_size=1) outputs = llm.generate(prompts, sampling_params) model_completions = [output.outputs[0].text.strip() for output in outputs] # Judge the outputs if "gpt" in args.judge_model: judge = OpenAIPairwiseJudge(args.judge_model) else: judge = HfPairwiseJudge(args.judge_model) completions = [[c0, c1] for c0, c1 in zip(reference_completions, model_completions)] best_idxs = judge.judge(prompts, completions) model_win_rate = best_idxs.count(1) / len(best_idxs) print(f"Model win rate: {model_win_rate*100:.2f}%")
trl/examples/scripts/evals/judge_tldr.py/0
{ "file_path": "trl/examples/scripts/evals/judge_tldr.py", "repo_id": "trl", "token_count": 1123 }
420
# Copyright 2023 The HuggingFace Team, the AllenNLP library authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to close stale issue. Taken in part from the AllenNLP repository. https://github.com/allenai/allennlp. """ import os from datetime import datetime as dt from datetime import timezone from github import Github LABELS_TO_EXEMPT = [ "good first issue", "good second issue", "feature request", "help wanted", ] def main(): g = Github(os.environ["GITHUB_TOKEN"]) repo = g.get_repo("huggingface/trl") open_issues = repo.get_issues(state="open") for issue in open_issues: comments = sorted(issue.get_comments(), key=lambda i: i.created_at, reverse=True) involved_users = [comment.user.login for comment in comments] inactive_days = (dt.now(timezone.utc) - issue.updated_at).days is_old = (dt.now(timezone.utc) - issue.created_at).days >= 30 has_comments = len([user for user in involved_users if user != "github-actions[bot]"]) > 0 to_exempt = any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) if is_old and not to_exempt: if has_comments and inactive_days > 23: issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\n" ) elif involved_users and involved_users[0] == "github-actions[bot]" and inactive_days > 7: issue.edit(state="closed") if __name__ == "__main__": main()
trl/scripts/stale.py/0
{ "file_path": "trl/scripts/stale.py", "repo_id": "trl", "token_count": 806 }
421
# Copyright 2023 metric-space, The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import torch from trl import is_diffusers_available, is_peft_available from .testing_utils import require_diffusers if is_diffusers_available() and is_peft_available(): from trl import DDPOConfig, DDPOTrainer, DefaultDDPOStableDiffusionPipeline def scorer_function(images, prompts, metadata): return torch.randn(1) * 3.0, {} def prompt_function(): return ("cabbages", {}) @require_diffusers class DDPOTrainerTester(unittest.TestCase): """ Test the DDPOTrainer class. """ def setUp(self): self.ddpo_config = DDPOConfig( num_epochs=2, train_gradient_accumulation_steps=1, per_prompt_stat_tracking_buffer_size=32, sample_num_batches_per_epoch=2, sample_batch_size=2, mixed_precision=None, save_freq=1000000, ) pretrained_model = "hf-internal-testing/tiny-stable-diffusion-torch" pretrained_revision = "main" pipeline = DefaultDDPOStableDiffusionPipeline( pretrained_model, pretrained_model_revision=pretrained_revision, use_lora=False ) self.trainer = DDPOTrainer(self.ddpo_config, scorer_function, prompt_function, pipeline) return super().setUp() def tearDown(self) -> None: gc.collect() def test_loss(self): advantage = torch.tensor([-1.0]) clip_range = 0.0001 ratio = torch.tensor([1.0]) loss = self.trainer.loss(advantage, clip_range, ratio) assert loss.item() == 1.0 def test_generate_samples(self): samples, output_pairs = self.trainer._generate_samples(1, 2) assert len(samples) == 1 assert len(output_pairs) == 1 assert len(output_pairs[0][0]) == 2 def test_calculate_loss(self): samples, _ = self.trainer._generate_samples(1, 2) sample = samples[0] latents = sample["latents"][0, 0].unsqueeze(0) next_latents = sample["next_latents"][0, 0].unsqueeze(0) log_probs = sample["log_probs"][0, 0].unsqueeze(0) timesteps = sample["timesteps"][0, 0].unsqueeze(0) prompt_embeds = sample["prompt_embeds"] advantage = torch.tensor([1.0], device=prompt_embeds.device) assert latents.shape == (1, 4, 64, 64) assert next_latents.shape == (1, 4, 64, 64) assert log_probs.shape == (1,) assert timesteps.shape == (1,) assert prompt_embeds.shape == (2, 77, 32) loss, approx_kl, clipfrac = self.trainer.calculate_loss( latents, timesteps, next_latents, log_probs, advantage, prompt_embeds ) assert torch.isfinite(loss.cpu()) @require_diffusers class DDPOTrainerWithLoRATester(DDPOTrainerTester): """ Test the DDPOTrainer class. """ def setUp(self): self.ddpo_config = DDPOConfig( num_epochs=2, train_gradient_accumulation_steps=1, per_prompt_stat_tracking_buffer_size=32, sample_num_batches_per_epoch=2, sample_batch_size=2, mixed_precision=None, save_freq=1000000, ) pretrained_model = "hf-internal-testing/tiny-stable-diffusion-torch" pretrained_revision = "main" pipeline = DefaultDDPOStableDiffusionPipeline( pretrained_model, pretrained_model_revision=pretrained_revision, use_lora=True ) self.trainer = DDPOTrainer(self.ddpo_config, scorer_function, prompt_function, pipeline) return super().setUp()
trl/tests/test_ddpo_trainer.py/0
{ "file_path": "trl/tests/test_ddpo_trainer.py", "repo_id": "trl", "token_count": 1784 }
422
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import platform import subprocess import torch def test(): command = """\ python examples/scripts/rloo/rloo.py \ --learning_rate 3e-6 \ --output_dir models/minimal/rloo \ --per_device_train_batch_size 4 \ --gradient_accumulation_steps 1 \ --total_episodes 10 \ --model_name_or_path EleutherAI/pythia-14m \ --non_eos_penalty \ --save_strategy no \ --stop_token eos \ """ if platform.system() == "Windows": # windows CI does not work with subprocesses for some reason # e.g., https://github.com/huggingface/trl/actions/runs/9600036224/job/26475286210?pr=1743 return subprocess.run( command, shell=True, check=True, ) def test_rloo_reward(): local_batch_size = 3 rloo_k = 4 # fmt: off rlhf_reward = torch.tensor([ 1, 2, 3, # first rlhf reward for three prompts 2, 3, 4, # second rlhf reward for three prompts 5, 6, 7, # third rlhf reward for three prompts 8, 9, 10, # fourth rlhf reward for three prompts ]).float() # fmt: on baseline = (rlhf_reward.sum(0) - rlhf_reward) / (rloo_k - 1) advantages = torch.zeros_like(rlhf_reward) for i in range(0, len(advantages), local_batch_size): other_response_rlhf_rewards = [] for j in range(0, len(advantages), local_batch_size): if i != j: other_response_rlhf_rewards.append(rlhf_reward[j : j + local_batch_size]) advantages[i : i + local_batch_size] = rlhf_reward[i : i + local_batch_size] - torch.stack( other_response_rlhf_rewards ).mean(0) assert (1 - (2 + 5 + 8) / 3 - advantages[0].item()) < 1e-6 assert (6 - (3 + 2 + 9) / 3 - advantages[7].item()) < 1e-6 # vectorized impl rlhf_reward = rlhf_reward.reshape(rloo_k, local_batch_size) baseline = (rlhf_reward.sum(0) - rlhf_reward) / (rloo_k - 1) vec_advantages = rlhf_reward - baseline torch.testing.assert_close(vec_advantages.flatten(), advantages)
trl/tests/test_rloo_trainer.py/0
{ "file_path": "trl/tests/test_rloo_trainer.py", "repo_id": "trl", "token_count": 1073 }
423
import logging from typing import Callable, Literal, Optional, Union from datasets import Dataset, Value from transformers import AutoTokenizer from ..trainer.utils import ConstantLengthDataset FORMAT_MAPPING = { "chatml": [{"content": Value(dtype="string", id=None), "role": Value(dtype="string", id=None)}], "instruction": {"completion": Value(dtype="string", id=None), "prompt": Value(dtype="string", id=None)}, } def conversations_formatting_function(tokenizer: AutoTokenizer, messages_field: Literal["messages", "conversations"]): r""" return a callable function that takes in a "messages" dataset and returns a formatted dataset, based on the tokenizer apply chat template to the dataset """ def format_dataset(examples): if isinstance(examples[messages_field][0], list): output_texts = [] for i in range(len(examples[messages_field])): output_texts.append(tokenizer.apply_chat_template(examples[messages_field][i], tokenize=False)) return output_texts else: return tokenizer.apply_chat_template(examples[messages_field], tokenize=False) return format_dataset def instructions_formatting_function(tokenizer: AutoTokenizer): r""" return a callable function that takes in an "instructions" dataset and returns a formatted dataset, based on the tokenizer apply chat template to the dataset """ def format_dataset(examples): if isinstance(examples["prompt"], list): output_texts = [] for i in range(len(examples["prompt"])): converted_sample = [ {"role": "user", "content": examples["prompt"][i]}, {"role": "assistant", "content": examples["completion"][i]}, ] output_texts.append(tokenizer.apply_chat_template(converted_sample, tokenize=False)) return output_texts else: converted_sample = [ {"role": "user", "content": examples["prompt"]}, {"role": "assistant", "content": examples["completion"]}, ] return tokenizer.apply_chat_template(converted_sample, tokenize=False) return format_dataset def get_formatting_func_from_dataset( dataset: Union[Dataset, ConstantLengthDataset], tokenizer: AutoTokenizer ) -> Optional[Callable]: r""" Finds the correct formatting function based on the dataset structure. Currently supported datasets are: - `ChatML` with [{"role": str, "content": str}] - `instruction` with [{"prompt": str, "completion": str}] Args: dataset (Dataset): User dataset tokenizer (AutoTokenizer): Tokenizer used for formatting Returns: Callable: Formatting function if the dataset format is supported else None """ if isinstance(dataset, Dataset): if "messages" in dataset.features: if dataset.features["messages"] == FORMAT_MAPPING["chatml"]: logging.info("Formatting dataset with chatml format") return conversations_formatting_function(tokenizer, "messages") if "conversations" in dataset.features: if dataset.features["conversations"] == FORMAT_MAPPING["chatml"]: logging.info("Formatting dataset with chatml format") return conversations_formatting_function(tokenizer, "conversations") elif dataset.features == FORMAT_MAPPING["instruction"]: logging.info("Formatting dataset with instruction format") return instructions_formatting_function(tokenizer) return None
trl/trl/extras/dataset_formatting.py/0
{ "file_path": "trl/trl/extras/dataset_formatting.py", "repo_id": "trl", "token_count": 1400 }
424
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass from typing import Dict, Literal, Optional from transformers import TrainingArguments @dataclass class CPOConfig(TrainingArguments): r""" CPOConfig collects all training arguments related to the [`CPOTrainer`] class. Using [`HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: max_length (`int`, defaults to `None`): The maximum length of the sequences in the batch. This argument is required if you want to use the default data collator. max_prompt_length (`int`, defaults to `None`): The maximum length of the prompt. This argument is required if you want to use the default data collator. max_target_length (`int`, defaults to `None`): The maximum length of the target. This argument is required if you want to use the default data collator and your model is an encoder-decoder. beta (`float`, defaults to 0.1): The beta factor in CPO loss. label_smoothing (`float`, defaults to 0): The label smoothing factor. This argument is required if you want to use the default data collator. loss_type (`str`, defaults to `sigmoid`): The type of loss to use. This argument is required if you want to use the default data collator. label_pad_token_id (`int`, defaults to `-100`): The label pad token id. This argument is required if you want to use the default data collator. cpo_alpha (`float`, defaults to `1.0`): A hyperparameter that controls the strength of the BC regularizer in CPO training. simpo_gamma (`float`, defaults to `0.5`): A target reward margin for the SimPO loss, used only when the "simpo" option is enabled. padding_value (`int`, defaults to `None`): The padding value if it is different to the tokenizer's pad_token_id. truncation_mode (`str`, defaults to `keep_end`): The truncation mode to use, either `keep_end` or `keep_start`. This argument is required if you want to use the default data collator. generate_during_eval (`bool`, defaults to `False`): Whether to sample and log generations during evaluation step. is_encoder_decoder (`Optional[bool]`, `optional`, defaults to `None`): If no model is provided, we need to know if the model_init returns an encoder-decoder. disable_dropout (`bool`, defaults to `True`): Whether or not to disable dropouts in `model`. model_init_kwargs (`Optional[Dict]`, *optional*): Dict of Optional kwargs to pass when instantiating the model from a string dataset_num_proc (`Optional[int]`, *optional*): The number of workers to use to tokenize the data. Defaults to None. """ max_length: Optional[int] = None max_prompt_length: Optional[int] = None max_completion_length: Optional[int] = None max_target_length: Optional[int] = None beta: float = 0.1 label_smoothing: float = 0 loss_type: Literal["sigmoid", "hinge", "ipo", "simpo"] = "sigmoid" disable_dropout: bool = True cpo_alpha: float = 1.0 simpo_gamma: float = 0.5 label_pad_token_id: int = -100 padding_value: int = None truncation_mode: str = "keep_end" generate_during_eval: bool = False is_encoder_decoder: Optional[bool] = None model_init_kwargs: Optional[Dict] = None dataset_num_proc: Optional[int] = None def __post_init__(self): if self.loss_type == "kto_pair": raise ValueError("Support for kto_pair has been removed in CPOTrainer. Please use KTOTrainer.") return super().__post_init__()
trl/trl/trainer/cpo_config.py/0
{ "file_path": "trl/trl/trainer/cpo_config.py", "repo_id": "trl", "token_count": 1553 }
425
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math import os import time import typing import warnings from contextlib import nullcontext from typing import Callable, List, Optional, Union import datasets import numpy as np import torch import torch.nn.functional as F from accelerate import Accelerator from accelerate.utils import ProjectConfiguration, gather_object, is_deepspeed_available from datasets import Dataset from huggingface_hub import whoami from packaging import version from torch.optim import Adam from transformers import ( DataCollatorForLanguageModeling, PreTrainedTokenizer, PreTrainedTokenizerBase, PreTrainedTokenizerFast, ) from ..core import ( WANDB_PADDING, PPODecorators, clip_by_value, convert_to_scalar, entropy_from_logits, flatten_dict, logprobs_from_logits, masked_mean, masked_var, masked_whiten, set_seed, stack_dicts, stats_to_np, ) from ..import_utils import is_npu_available, is_torch_greater_2_0, is_xpu_available from ..models import ( SUPPORTED_ARCHITECTURES, PreTrainedModelWrapper, create_reference_model, unwrap_model_for_generation, ) from . import AdaptiveKLController, BaseTrainer, FixedKLController, PPOConfig, RunningMoments if is_deepspeed_available(): import deepspeed MODEL_CARD_TEMPLATE = """--- license: apache-2.0 library_name: transformers tags: - trl - ppo - transformers - reinforcement-learning --- # {model_name} This is a [TRL language model](https://github.com/huggingface/trl) that has been fine-tuned with reinforcement learning to guide the model outputs according to a value, function, or human feedback. The model can be used for text generation. ## Usage To use this model for inference, first install the TRL library: ```bash python -m pip install trl ``` You can then generate text as follows: ```python from transformers import pipeline generator = pipeline("text-generation", model="{model_id}") outputs = generator("Hello, my llama is cute") ``` If you want to use the model for training or to obtain the outputs from the value head, load the model as follows: ```python from transformers import AutoTokenizer from trl import AutoModelForCausalLMWithValueHead tokenizer = AutoTokenizer.from_pretrained("{model_id}") model = AutoModelForCausalLMWithValueHead.from_pretrained("{model_id}") inputs = tokenizer("Hello, my llama is cute", return_tensors="pt") outputs = model(**inputs, labels=inputs["input_ids"]) ``` """ class PPOTrainer(BaseTrainer): """ The PPOTrainer uses Proximal Policy Optimization to optimise language models. Note, this trainer is heavily inspired by the original OpenAI learning to summarize work here: https://github.com/openai/summarize-from-feedback Attributes: **config** (`PPOConfig`) -- Configuration object for PPOTrainer. Check the documentation of `PPOConfig` for more details. **model** (`PreTrainedModelWrapper`) -- Model to be optimized, Hugging Face transformer model with a value head. Check the documentation of `PreTrainedModelWrapper` for more details. **ref_model** (`PreTrainedModelWrapper`, *optional*) -- Reference model to be used for KL penalty, Hugging Face transformer model with a casual language modelling head. Check the documentation of `PreTrainedModelWrapper` for more details. If no reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized with shared layers. **tokenizer** (`PreTrainedTokenizerBase`) -- Tokenizer to be used for encoding the data. Check the documentation of `transformers.PreTrainedTokenizer` and `transformers.PreTrainedTokenizerFast` for more details. **dataset** (Union[`torch.utils.data.Dataset`, `datasets.Dataset`], *optional*) -- PyTorch dataset or Hugging Face dataset. This is used to create a PyTorch dataloader. If no dataset is provided, the dataloader must be created outside the trainer users needs to design their own dataloader and make sure the batch size that is used is the same as the one specified in the configuration object. **optimizer** (`torch.optim.Optimizer`, *optional*) -- Optimizer to be used for training. If no optimizer is provided, the trainer will create an Adam optimizer with the learning rate specified in the configuration object. **data_collator** (DataCollatorForLanguageModeling, *optional*) -- Data collator to be used for training and passed along the dataloader **num_shared_layers** (int, *optional*) -- Number of layers to be shared between the model and the reference model, if no reference model is passed. If no number is provided, all the layers will be shared. **lr_scheduler** (`torch.optim.lr_scheduler`, *optional*) -- Learning rate scheduler to be used for training. """ _tag_names = ["trl", "ppo"] def __init__( self, config: Optional[PPOConfig] = None, model: Optional[PreTrainedModelWrapper] = None, ref_model: Optional[PreTrainedModelWrapper] = None, tokenizer: Optional[PreTrainedTokenizerBase] = None, dataset: Optional[Union[torch.utils.data.Dataset, Dataset]] = None, optimizer: Optional[torch.optim.Optimizer] = None, data_collator: Optional[typing.Callable] = None, num_shared_layers: Optional[int] = None, lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None, training_data_collator: Optional[typing.Callable] = None, ): """ Initialize PPOTrainer. Args: config (`PPOConfig`): Configuration object for PPOTrainer. Check the documentation of `PPOConfig` for more details. model (`PreTrainedModelWrapper`): Hugging Face transformer model with a value head. ref_model (`PreTrainedModelWrapper`): Hugging Face transformer model with a casual language modelling head. Used for KL penalty tokenizer (`transformers.PreTrainedTokenizerBase`): Hugging Face tokenizer dataset (Optional[Union[`torch.utils.data.Dataset`, `datasets.Dataset`]]): PyTorch dataset or Hugging Face dataset. If a Hugging Face dataset is passed, the dataset will be preprocessed by removing the columns that are not used by the model. If none is passed, a warning will be raised in a multi-GPU setting. optimizer (`Optional[torch.optim.Optimizer]`): Optimizer used for training. If `None`, the `Adam` is used as default. data_collator (Optional[function]): Data collator function that is going to be used for `prepare_dataloader` method. Note this collator is different from the one we use for training. Pass a valid `training_data_collator` instead. num_shared_layers (Optional[int]): Number of shared layers between the model and the reference model. If `None`, all layers are shared. used only if `ref_model` is `None`. lr_scheduler (`Optional[torch.optim.lr_scheduler]`): Learning rate scheduler used for training. training_data_collator (Optional[function]): Custom data collator used for training. """ super().__init__(config) # initial seed for reproducible experiments set_seed(config.seed) # Step 0: check positional arguments validity if not isinstance(config, PPOConfig): raise ValueError(f"config must be a PPOConfig, got {type(config)}") if not isinstance(tokenizer, (PreTrainedTokenizerBase)): raise ValueError( f"tokenizer must be a PreTrainedTokenizerBase like a PreTrainedTokenizer or a PreTrainedTokenizerFast, got {type(tokenizer)}" ) if not isinstance(model, (SUPPORTED_ARCHITECTURES)): raise ValueError( f"model must be a PreTrainedModelWrapper, got {type(model)} - supported architectures are: {SUPPORTED_ARCHITECTURES}" ) # Step 1: Initialize Accelerator self.accelerator = Accelerator( log_with=config.log_with, gradient_accumulation_steps=config.gradient_accumulation_steps, project_config=ProjectConfiguration(**config.project_kwargs), **config.accelerator_kwargs, ) # Step 1.1 Runtime variables filled by the accelerator config.world_size = self.accelerator.num_processes config.global_backward_batch_size = config.backward_batch_size * config.world_size config.global_batch_size = config.batch_size * config.world_size self.model = model self.model_params = filter(lambda p: p.requires_grad, self.model.parameters()) self.is_encoder_decoder = hasattr(self.model, "is_encoder_decoder") self.is_peft_model = getattr(self.model, "is_peft_model", False) config.is_encoder_decoder = self.is_encoder_decoder config.is_peft_model = self.is_peft_model is_using_tensorboard = config.log_with is not None and config.log_with == "tensorboard" self.accelerator.init_trackers( config.tracker_project_name, config=dict(trl_ppo_trainer_config=config.to_dict()) if not is_using_tensorboard else config.to_dict(), init_kwargs=config.tracker_kwargs, ) self.is_using_text_environment = getattr(config, "use_text_environment", False) if isinstance(ref_model, SUPPORTED_ARCHITECTURES): self.ref_model = ref_model if num_shared_layers is not None: warnings.warn( "num_shared_layers is ignored when ref_model is provided. Two different models are used for the " "model and the reference model and no layers are shared.", UserWarning, ) elif ref_model is None and not self.is_peft_model: self.ref_model = create_reference_model(self.model, num_shared_layers=num_shared_layers) elif self.is_peft_model: self.ref_model = None else: raise ValueError( f"ref_model must be a PreTrainedModelWrapper or `None`, got {type(ref_model)} - supported " f"architectures are: {SUPPORTED_ARCHITECTURES} " ) self.optional_peft_ctx = ( self.accelerator.unwrap_model(self.model).pretrained_model.disable_adapter if self.is_peft_model else nullcontext ) if not (isinstance(tokenizer, PreTrainedTokenizer) or isinstance(tokenizer, PreTrainedTokenizerFast)): raise ValueError( "tokenizer must be a transformers.PreTrainedTokenizer or transformers.PreTrainedTokenizerFast" ) self.tokenizer = tokenizer if dataset is not None and not (isinstance(dataset, torch.utils.data.Dataset) or isinstance(dataset, Dataset)): raise ValueError("dataset must be a torch.utils.data.Dataset or datasets.Dataset") elif dataset is None: warnings.warn( "No dataset is provided. Make sure to set config.batch_size to the correct value before training.", UserWarning, ) self.dataset = dataset self._signature_columns = None if self.dataset is not None: self.dataloader = self.prepare_dataloader(self.dataset, data_collator) elif self.dataset is None and self.accelerator.num_processes > 1: warnings.warn( "No dataset is provided. In a multi-GPU setting, this will lead to an error. You should" " prepare your dataloader yourself with `dataloader = ppo_trainer.accelerator.prepare(dataloader)`" " and using `torch.utils.data.DataLoader`, or pass a dataset to the `PPOTrainer`. Please " " refer to the documentation for more details.", UserWarning, ) self.dataloader = None else: self.dataloader = None # Step 3: Initialize optimizer and data collator if training_data_collator is None: self.data_collator = DataCollatorForLanguageModeling(self.tokenizer, mlm=False) else: self.data_collator = training_data_collator if optimizer is None: self.optimizer = Adam( filter(lambda p: p.requires_grad, self.model.parameters()), lr=self.config.learning_rate, ) else: self.optimizer = optimizer self.lr_scheduler = lr_scheduler if self.lr_scheduler is not None: lr_scheduler_class = ( torch.optim.lr_scheduler._LRScheduler if not is_torch_greater_2_0() else torch.optim.lr_scheduler.LRScheduler ) if not isinstance(self.lr_scheduler, lr_scheduler_class): raise ValueError( "lr_scheduler must be a torch.optim.lr_scheduler._LRScheduler or torch.optim.lr_scheduler.LRScheduler (for torch >= 2.0)" ) if self.config.adap_kl_ctrl: self.kl_ctl = AdaptiveKLController(self.config.init_kl_coef, self.config.target, self.config.horizon) else: self.kl_ctl = FixedKLController(self.config.init_kl_coef) # Safety checkers for DS integration is_deepspeed_used = self.accelerator.distributed_type == "DEEPSPEED" and hasattr( self.accelerator.state, "deepspeed_plugin" ) if config.gradient_checkpointing: self.model.gradient_checkpointing_enable() if hasattr(self.model, "enable_input_require_grads"): self.model.enable_input_require_grads() else: # For backward compatibility with older versions of transformers def make_inputs_require_grad(module, input, output): output.requires_grad_(True) self.model.pretrained_model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) ( self.model, self.optimizer, self.data_collator, self.dataloader, self.lr_scheduler, ) = self.accelerator.prepare( self.model, self.optimizer, self.data_collator, self.dataloader, self.lr_scheduler, ) if is_deepspeed_used: # Quantized models are already set on the correct device if not self.is_peft_model and not ( getattr(self.ref_model.pretrained_model, "is_loaded_in_8bit", False) or getattr(self.ref_model.pretrained_model, "is_loaded_in_4bit", False) ): self.ref_model = self._prepare_deepspeed(self.ref_model) else: self.ref_model = self.accelerator.prepare(self.ref_model) # In a distributed setup, only logging needs to be performed on the main process # check: https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html # or: https://discuss.pytorch.org/t/use-distributed-data-parallel-correctly/82500/11 self.is_distributed = self.accelerator.num_processes > 1 # init the current step self.current_step = 0 # init variables for pushing model to hub if config.push_to_hub_if_best_kwargs: if "repo_id" not in config.push_to_hub_if_best_kwargs: raise ValueError("You have to specify repo_id in order to push the model to the hub!") self.push_to_hub_kwargs = config.push_to_hub_if_best_kwargs self.compare_step = 0 self.highest_reward = torch.tensor(-float("inf")) # post process for PP if not getattr(self.model, "is_sequential_parallel", False): self.current_device = self.accelerator.device else: if is_xpu_available(): self.current_device = torch.device("xpu:0") elif is_npu_available(): self.current_device = torch.device("npu:0") else: self.current_device = torch.device("cuda:0") PPODecorators.optimize_device_cache = self.config.optimize_device_cache self.running = RunningMoments(self.accelerator) def _filter_kwargs(self, kwargs, target_func): """ filter the keyword arguments that are supported by the target function. Args: kwargs (dict): Keyword arguments target_func (function): Target function """ return {k: v for k, v in kwargs.items() if k in inspect.signature(target_func).parameters.keys()} def prepare_dataloader(self, dataset: Union[torch.utils.data.Dataset, Dataset], data_collator=None): """ Prepare the dataloader for training. Args: dataset (Union[`torch.utils.data.Dataset`, `datasets.Dataset`]): PyTorch dataset or Hugging Face dataset. If a Hugging Face dataset is passed, the dataset will be preprocessed by removing the columns that are not used by the model. data_collator (Optional[function]): Data collator function. Returns: `torch.utils.data.DataLoader`: PyTorch dataloader """ if isinstance(dataset, Dataset): dataset = self._remove_unused_columns(dataset) dataloader = torch.utils.data.DataLoader( dataset, batch_size=self.config.batch_size, collate_fn=data_collator, shuffle=True, drop_last=True, ) return dataloader # Adapted from transformers.Trainer._set_signature_columns_if_needed def _set_signature_columns_if_needed(self): if self._signature_columns is None: # Inspect model forward signature to keep only the arguments it accepts. signature = inspect.signature(self.model.forward) self._signature_columns = list(signature.parameters.keys()) # label => sentiment | we need query and response for logging purpose self._signature_columns += ["label", "query", "response"] # Adapted from transformers.Trainer._remove_unused_columns def _remove_unused_columns(self, dataset: "Dataset"): if not self.config.remove_unused_columns: return dataset self._set_signature_columns_if_needed() signature_columns = self._signature_columns ignored_columns = list(set(dataset.column_names) - set(signature_columns)) columns = [k for k in signature_columns if k in dataset.column_names] if version.parse(datasets.__version__) < version.parse("1.4.0"): dataset.set_format( type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"], ) return dataset else: return dataset.remove_columns(ignored_columns) def generate( self, query_tensor: Union[torch.Tensor, List[torch.Tensor]], length_sampler: Optional[Callable] = None, batch_size: int = 4, return_prompt: bool = True, generate_ref_response: bool = False, **generation_kwargs, ): """ Generate response with the model given the query tensor. call the `generate` method of the model. Args: query_tensor (`torch.LongTensor`): A tensor of shape (`seq_len`) containing query tokens or a list of tensors of shape (`seq_len`). length_sampler (`Callable`, *optional*): Callable that returns the number of newly generated tokens. batch_size (`int`, *optional): Batch size used for generation, defaults to `4`. return_prompt (`bool`, *optional*): If set to `False` the prompt is not returned but only the newly generated tokens, defaults to `True`. generate_ref_response (`bool`, *optional*): If set to `True` the reference response is also generated, defaults to `False`. generation_kwargs (dict[str, Any]): Keyword arguments for generation. Returns: `torch.LongTensor`: A tensor of shape (`batch_size`, `gen_len`) containing response tokens. """ if generate_ref_response: ref_model = self.model if self.is_peft_model else self.ref_model if isinstance(query_tensor, List): response = self._generate_batched( self.model, query_tensor, length_sampler=length_sampler, batch_size=batch_size, return_prompt=return_prompt, **generation_kwargs, ) if generate_ref_response: ref_response = self._generate_batched( ref_model, query_tensor, length_sampler=length_sampler, batch_size=batch_size, return_prompt=return_prompt, **generation_kwargs, ) else: if len(query_tensor.shape) == 2: raise ValueError( "query_tensor must be a tensor of shape (`seq_len`) or a list of tensors of shape (`seq_len`)" ) if length_sampler is not None: generation_kwargs["max_new_tokens"] = length_sampler() with unwrap_model_for_generation(self.model, self.accelerator) as unwrapped_model: response = unwrapped_model.generate(input_ids=query_tensor.unsqueeze(dim=0), **generation_kwargs) if generate_ref_response: with unwrap_model_for_generation( ref_model, self.accelerator, is_peft_model=self.is_peft_model ) as unwrapped_model: ref_response = unwrapped_model.generate( input_ids=query_tensor.unsqueeze(dim=0), **generation_kwargs ) if not return_prompt and not self.is_encoder_decoder: response = response[:, query_tensor.shape[0] :] if generate_ref_response: ref_response = ref_response[:, query_tensor.shape[0] :] if generate_ref_response: return response, ref_response return response def _generate_batched( self, model: PreTrainedModelWrapper, query_tensors: List[torch.Tensor], length_sampler: Optional[Callable] = None, batch_size: int = 4, return_prompt: bool = True, pad_to_multiple_of: Optional[int] = None, remove_padding: bool = True, **generation_kwargs, ): outputs = [] padding_side_default = self.tokenizer.padding_side if not self.is_encoder_decoder: self.tokenizer.padding_side = "left" # in case we have fewer examples than bs batch_size = min(len(query_tensors), batch_size) for i in range(0, len(query_tensors), batch_size): if length_sampler is not None: generation_kwargs["max_new_tokens"] = length_sampler() # prevent overflow if query tensors are not even multiple of bs end_index = min(len(query_tensors), i + batch_size) batch = query_tensors[i:end_index] batch_mask = [torch.ones_like(element) for element in batch] inputs = {"input_ids": batch, "attention_mask": batch_mask} padded_inputs = self.tokenizer.pad( inputs, padding=True, max_length=None, pad_to_multiple_of=pad_to_multiple_of, return_tensors="pt", ).to(self.current_device) with unwrap_model_for_generation(model, self.accelerator) as unwrapped_model: generations = unwrapped_model.generate(**padded_inputs, **generation_kwargs) for generation, mask in zip(generations, padded_inputs["attention_mask"]): if not self.is_encoder_decoder: output = generation[(1 - mask).sum() :] # remove padding else: output = generation if not return_prompt and not self.is_encoder_decoder: output = output[(mask).sum() :] # remove prompt if remove_padding and self.tokenizer.eos_token_id in output: pad_mask = output == self.tokenizer.eos_token_id pad_start = torch.nonzero(pad_mask, as_tuple=False)[0, 0].item() output = output[: pad_start + 1] # keep the eos token at the end outputs.append(output) self.tokenizer.padding_side = padding_side_default return outputs def _step_safety_checker( self, batch_size: int, queries: List[torch.LongTensor], responses: List[torch.LongTensor], scores: List[torch.FloatTensor], masks: Optional[List[torch.LongTensor]] = None, ): """ Check if the input data is valid for training. Args: batch_size (int): Batch size from the config file. queries (List[`torch.LongTensor`]): List of tensors containing the encoded queries of shape (`query_length`) responses (List[`torch.LongTensor`]): List of tensors containing the encoded responses of shape (`response_length`) scores (List[`torch.FloatTensor`]): List of tensors containing the scores. masks (List[`torch.LongTensor`], *optional*): list of optional tensors containing the masks of shape (`response_length`) Returns: `tuple`: The input processed data. """ for name, tensor_list in zip(["queries", "responses", "scores"], [queries, responses, scores]): if not isinstance(tensor_list, list): raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}") if not isinstance(tensor_list[0], torch.Tensor): raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}") if batch_size is not None and len(tensor_list) != batch_size: raise ValueError( f"Batch size ({batch_size}) does not match number of examples - but got {len(tensor_list)} for: {name}" ) # add queries, scores and responses on the correct device queries = [tensor.to(self.current_device) for tensor in queries] responses = [tensor.to(self.current_device) for tensor in responses] scores = [tensor.to(self.current_device) for tensor in scores] masks = [tensor.to(self.current_device) for tensor in masks] if masks is not None else None # squeeze scores if needed for i, score in enumerate(scores): if score.dim() > 1: raise ValueError(f"Scores must be 1-dimensional - got {score.dim()} for {score}") elif score.dim() == 1: scores[i] = score.squeeze() return queries, responses, scores, masks @PPODecorators.empty_device_cache() def step( self, queries: List[torch.LongTensor], responses: List[torch.LongTensor], scores: List[torch.FloatTensor], response_masks: Optional[List[torch.LongTensor]] = None, ): """ Run a PPO optimisation step given a list of queries, model responses, and rewards. Args: queries (List[`torch.LongTensor`]): List of tensors containing the encoded queries of shape (`query_length`) responses (List[`torch.LongTensor`]): List of tensors containing the encoded responses of shape (`response_length`) scores (List[`torch.FloatTensor`]): List of tensors containing the scores. response_masks (List[`torch.FloatTensor`], *optional*)): List of tensors containing masks of the response tokens. Returns: `dict[str, Any]`: A summary of the training statistics """ bs = self.config.batch_size queries, responses, scores, response_masks = self._step_safety_checker( bs, queries, responses, scores, response_masks ) scores = torch.tensor(scores, device=self.current_device) if self.config.use_score_scaling: # Score scaling scores_mean, scores_std = self.running.update(scores) tensor_to_kwargs = dict(dtype=scores.dtype, device=scores.device) score_scaling_factor = self.running.std.to(**tensor_to_kwargs) + torch.finfo(scores.dtype).eps if self.config.use_score_norm: scores = (scores - self.running.mean.to(**tensor_to_kwargs)) / score_scaling_factor else: scores /= score_scaling_factor if self.config.score_clip is not None: # Score clipping scores_dtype = scores.dtype scores = torch.clip(scores.float(), -self.config.score_clip, self.config.score_clip).to(dtype=scores_dtype) # if we want to push best model to the hub if hasattr(self, "highest_reward"): if self.compare_step % self.config.compare_steps == 0: curr_mean_reward = scores.mean() # if the best reward ever seen if curr_mean_reward > self.highest_reward: self.highest_reward = curr_mean_reward # push model to hub self.push_to_hub(**self.push_to_hub_kwargs) self.compare_step += 1 timing = dict() t0 = time.time() t = time.time() model_inputs = self.prepare_model_inputs(queries, responses) if self.is_distributed: pad_first = self.tokenizer.padding_side == "left" model_inputs["input_ids"] = self.accelerator.pad_across_processes( model_inputs["input_ids"], dim=1, pad_index=self.tokenizer.pad_token_id, pad_first=pad_first, ) model_inputs["attention_mask"] = self.accelerator.pad_across_processes( model_inputs["attention_mask"], dim=1, pad_index=0, pad_first=pad_first ) if self.is_encoder_decoder: model_inputs["decoder_input_ids"] = self.accelerator.pad_across_processes( model_inputs["decoder_input_ids"], dim=1, pad_index=self.tokenizer.pad_token_id, pad_first=pad_first, ) model_inputs["decoder_attention_mask"] = self.accelerator.pad_across_processes( model_inputs["decoder_attention_mask"], dim=1, pad_index=0, pad_first=pad_first, ) model_inputs_names = list(model_inputs.keys()) full_kl_penalty = self.config.kl_penalty == "full" with torch.no_grad(): all_logprobs, logits_or_none, values, masks = self.batched_forward_pass( self.model, queries, responses, model_inputs, response_masks=response_masks, return_logits=full_kl_penalty, ) with self.optional_peft_ctx(): ref_logprobs, ref_logits_or_none, _, _ = self.batched_forward_pass( self.model if self.is_peft_model else self.ref_model, queries, responses, model_inputs, return_logits=full_kl_penalty, ) timing["time/ppo/forward_pass"] = time.time() - t with torch.no_grad(): t = time.time() if full_kl_penalty: active_full_logprobs = logprobs_from_logits(logits_or_none, None, gather=False) ref_full_logprobs = logprobs_from_logits(ref_logits_or_none, None, gather=False) rewards, non_score_reward, kls = self.compute_rewards( scores, active_full_logprobs, ref_full_logprobs, masks ) else: rewards, non_score_reward, kls = self.compute_rewards(scores, all_logprobs, ref_logprobs, masks) timing["time/ppo/compute_rewards"] = time.time() - t t = time.time() values, advantages, returns = self.compute_advantages(values, rewards, masks) timing["time/ppo/compute_advantages"] = time.time() - t # upcast to float32 to avoid dataset issues batch_dict = { "queries": queries, "responses": responses, "logprobs": all_logprobs.to(torch.float32), "values": values.to(torch.float32), "masks": masks, "advantages": advantages, "returns": returns, } batch_dict.update(model_inputs) t = time.time() all_stats = [] early_stop = False for _ in range(self.config.ppo_epochs): if early_stop: break b_inds = np.random.permutation(bs) for backward_batch_start in range(0, bs, self.config.backward_batch_size): backward_batch_end = backward_batch_start + self.config.backward_batch_size backward_batch_inds = b_inds[backward_batch_start:backward_batch_end] for mini_batch_start in range(0, self.config.backward_batch_size, self.config.mini_batch_size): mini_batch_end = mini_batch_start + self.config.mini_batch_size mini_batch_inds = backward_batch_inds[mini_batch_start:mini_batch_end] mini_batch_dict = { "logprobs": batch_dict["logprobs"][mini_batch_inds], "values": batch_dict["values"][mini_batch_inds], "masks": batch_dict["masks"][mini_batch_inds], # hacks: the queries and responses are ragged. "queries": [batch_dict["queries"][i] for i in mini_batch_inds], "responses": [batch_dict["responses"][i] for i in mini_batch_inds], "advantages": batch_dict["advantages"][mini_batch_inds], "returns": batch_dict["returns"][mini_batch_inds], } for k in model_inputs_names: mini_batch_dict[k] = batch_dict[k][mini_batch_inds] with self.accelerator.accumulate(self.model): model_inputs = {k: mini_batch_dict[k] for k in model_inputs_names} logprobs, logits, vpreds, _ = self.batched_forward_pass( self.model, mini_batch_dict["queries"], mini_batch_dict["responses"], model_inputs, return_logits=True, ) train_stats = self.train_minibatch( mini_batch_dict["logprobs"], mini_batch_dict["values"], logprobs, logits, vpreds, mini_batch_dict["masks"], mini_batch_dict["advantages"], mini_batch_dict["returns"], ) all_stats.append(train_stats) # typically, early stopping is done at the epoch level if self.config.early_stopping: policykl = train_stats["policy/policykl"] early_stop = self._early_stop(policykl) if early_stop: break timing["time/ppo/optimize_step"] = time.time() - t t = time.time() train_stats = stack_dicts(all_stats) # reshape advantages/ratios such that they are not averaged. train_stats["policy/advantages"] = torch.flatten(train_stats["policy/advantages"]).unsqueeze(0) train_stats["policy/advantages"] = torch.nan_to_num(train_stats["policy/advantages"], WANDB_PADDING) train_stats["policy/ratio"] = torch.flatten(train_stats["policy/ratio"]).unsqueeze(0) stats = self.record_step_stats( scores=scores, logprobs=all_logprobs, ref_logprobs=ref_logprobs, non_score_reward=non_score_reward, train_stats=train_stats, kl_coef=self.kl_ctl.value, masks=masks, queries=queries, responses=responses, kls=kls, ) # Gather/Reduce stats from all processes if self.is_distributed: stats = self.gather_stats(stats) stats = stats_to_np(stats) timing["time/ppo/calc_stats"] = time.time() - t stats["ppo/learning_rate"] = self.optimizer.param_groups[0]["lr"] # Update the KL control - multiply the batch_size by the number of processes self.kl_ctl.update( stats["objective/kl"], self.config.batch_size * self.accelerator.num_processes, ) # Log the total ppo time timing["time/ppo/total"] = time.time() - t0 stats.update(timing) # post-process stats for tensorboard and other loggers if self.config.log_with != "wandb": stats = convert_to_scalar(stats) if self.lr_scheduler is not None: self.lr_scheduler.step() return stats def _early_stop(self, policykl): r""" Handles the early stopping logic. If the policy KL is greater than the target KL, then the gradient is zeroed and the optimization step is skipped. This also handles the multi-gpu case where the policy KL is averaged across all processes. Args: policy_kl (torch.Tensor): the policy KL Returns: `bool`: whether to early stop or not """ early_stop = False if not self.config.early_stopping: return early_stop if not self.is_distributed and policykl > 1.5 * self.config.target_kl: self.optimizer.zero_grad() early_stop = True elif self.is_distributed: import torch.distributed as dist # Wait for all processes to finish dist.barrier() # all gather the policykl dist.all_reduce(policykl, dist.ReduceOp.SUM) policykl /= self.accelerator.num_processes if policykl > 1.5 * self.config.target_kl: self.optimizer.zero_grad() early_stop = True return early_stop def gather_stats(self, stats): """ Gather stats from all processes. Useful in the context of distributed training. Args: stats (dict[str, Any]): a dictionary of stats to be gathered. The stats should contain torch tensors. Returns: `dict[str, Any]`: A dictionary of stats with the tensors gathered. """ import torch.distributed as dist # Wait for all processes to finish dist.barrier() for k, v in stats.items(): if isinstance(v, torch.Tensor): dist.all_reduce(v.to(self.accelerator.device), dist.ReduceOp.SUM) v /= self.accelerator.num_processes stats[k] = v return stats def prepare_model_inputs(self, queries: torch.Tensor, responses: torch.Tensor): if self.is_encoder_decoder: input_data = self.data_collator( [{"input_ids": q, "attention_mask": torch.ones_like(q)} for q in queries] ).to(self.current_device) decoder_inputs = self.data_collator( [{"input_ids": r, "attention_mask": torch.ones_like(r)} for r in responses] ).to(self.current_device) input_data["decoder_input_ids"] = decoder_inputs["input_ids"] input_data["decoder_attention_mask"] = decoder_inputs["attention_mask"] else: input_ids = [torch.cat([q, r]) for q, r in zip(queries, responses)] input_data = self.data_collator( [{"input_ids": ids, "attention_mask": torch.ones_like(ids)} for ids in input_ids] ).to(self.current_device) input_data.pop("labels", None) # we don't want to compute LM losses return input_data @PPODecorators.empty_device_cache() def batched_forward_pass( self, model: PreTrainedModelWrapper, queries: torch.Tensor, responses: torch.Tensor, model_inputs: dict, return_logits: bool = False, response_masks: Optional[torch.Tensor] = None, ): """ Calculate model outputs in multiple batches. Args: queries (`torch.LongTensor`): List of tensors containing the encoded queries, shape (`batch_size`, `query_length`) responses (`torch.LongTensor`): List of tensors containing the encoded responses, shape (`batch_size`, `response_length`) return_logits (`bool`, *optional*, defaults to `False`): Whether to return all_logits. Set to `False` if logits are not needed to reduce memory consumption. Returns: (tuple): - all_logprobs (`torch.FloatTensor`): Log probabilities of the responses, shape (`batch_size`, `response_length`) - all_ref_logprobs (`torch.FloatTensor`): Log probabilities of the responses, shape (`batch_size`, `response_length`) - all_values (`torch.FloatTensor`): Values of the responses, shape (`batch_size`, `response_length`) """ bs = len(queries) fbs = self.config.mini_batch_size all_logprobs = [] all_logits = [] all_masks = [] all_values = [] model.eval() for i in range(math.ceil(bs / fbs)): input_kwargs = {key: value[i * fbs : (i + 1) * fbs] for key, value in model_inputs.items()} query_batch = queries[i * fbs : (i + 1) * fbs] response_batch = responses[i * fbs : (i + 1) * fbs] if response_masks is not None: response_masks_batch = response_masks[i * fbs : (i + 1) * fbs] logits, _, values = model(**input_kwargs) if self.is_encoder_decoder: input_ids = input_kwargs["decoder_input_ids"] attention_mask = input_kwargs["decoder_attention_mask"] else: input_ids = input_kwargs["input_ids"] attention_mask = input_kwargs["attention_mask"] logprobs = logprobs_from_logits(logits[:, :-1, :], input_ids[:, 1:]) masks = torch.zeros_like(attention_mask) masks[:, :-1] = attention_mask[:, 1:] for j in range(len(query_batch)): if self.is_encoder_decoder: # Decoder sentence starts always in the index 1 after padding in the Enc-Dec Models start = 1 end = attention_mask[j, :].sum() - 1 else: start = len(query_batch[j]) - 1 # logprobs starts from the second query token if attention_mask[j, 0] == 0: # offset left padding start += attention_mask[j, :].nonzero()[0] end = start + len(response_batch[j]) masks[j, :start] = 0 masks[j, end:] = 0 if response_masks is not None: masks[j, start:end] = masks[j, start:end] * response_masks_batch[j] if return_logits: all_logits.append(logits) else: del logits all_values.append(values) all_logprobs.append(logprobs) all_masks.append(masks) return ( torch.cat(all_logprobs), torch.cat(all_logits)[:, :-1] if return_logits else None, torch.cat(all_values)[:, :-1], torch.cat(all_masks)[:, :-1], ) @PPODecorators.empty_device_cache() def train_minibatch( self, old_logprobs: torch.FloatTensor, values: torch.FloatTensor, logprobs: torch.FloatTensor, logits: torch.FloatTensor, vpreds: torch.FloatTensor, mask: torch.LongTensor, advantages: torch.FloatTensor, returns: torch.FloatTensor, ): """ Train one PPO minibatch Args: logprobs (`torch.FloatTensor`): Log probabilities of the model, shape [mini_batch_size, response_length] values (`torch.FloatTensor`): Values of the value head, shape [mini_batch_size, response_length] query (`torch.LongTensor`): Encoded queries, shape [mini_batch_size, query_length] response (`torch.LongTensor`): Encoded responses, shape [mini_batch_size, response_length] model_input (`torch.LongTensor`): Concatenated queries and responses, shape [mini_batch_size, query_length+response_length] Returns: train_stats (dict[str, `torch.Tensor`]): Dictionary of training statistics """ self.model.train() loss_p, loss_v, train_stats = self.loss( old_logprobs, values, logits, vpreds, logprobs, mask, advantages, returns ) loss = loss_p + loss_v self.accelerator.backward(loss) if self.config.max_grad_norm is not None: if self.accelerator.sync_gradients: self.accelerator.clip_grad_norm_(self.model_params, self.config.max_grad_norm) self.optimizer.step() # we call optimizer.zero_grad() every time and let `accelerator` handle accumulation # see https://huggingface.co/docs/accelerate/usage_guides/gradient_accumulation#the-finished-code self.optimizer.zero_grad() return train_stats def compute_rewards( self, scores: torch.FloatTensor, logprobs: torch.FloatTensor, ref_logprobs: torch.FloatTensor, masks: torch.LongTensor, ): """ Compute per token rewards from scores and KL-penalty. Args: scores (`torch.FloatTensor`): Scores from the reward model, shape (`batch_size`) logprobs (`torch.FloatTensor`): Log probabilities of the model, shape (`batch_size`, `response_length`) ref_logprobs (`torch.FloatTensor`): Log probabilities of the reference model, shape (`batch_size`, `response_length`) Returns: `torch.FloatTensor`: Per token rewards, shape (`batch_size`, `response_length`) `torch.FloatTensor`: Non score rewards, shape (`batch_size`, `response_length`) `torch.FloatTensor`: KL penalty, shape (`batch_size`, `response_length`) """ rewards, non_score_rewards, kls = [], [], [] for score, logprob, ref_logprob, mask in zip(scores, logprobs, ref_logprobs, masks): # compute KL penalty (from difference in logprobs) kl = self._kl_penalty(logprob, ref_logprob) kls.append(kl) non_score_reward = -self.kl_ctl.value * kl non_score_rewards.append(non_score_reward) reward = non_score_reward.clone() last_non_masked_index = mask.nonzero()[-1] # reward is preference model score + KL penalty reward[last_non_masked_index] += score rewards.append(reward) return torch.stack(rewards), torch.stack(non_score_rewards), torch.stack(kls) def _kl_penalty(self, logprob: torch.FloatTensor, ref_logprob: torch.FloatTensor) -> torch.FloatTensor: if self.config.kl_penalty == "kl": return logprob - ref_logprob if self.config.kl_penalty == "abs": return (logprob - ref_logprob).abs() if self.config.kl_penalty == "mse": return 0.5 * (logprob - ref_logprob).square() if self.config.kl_penalty == "full": # Flip is required due to this issue? :https://github.com/pytorch/pytorch/issues/57459 return F.kl_div(ref_logprob, logprob, log_target=True, reduction="none").sum(-1) raise NotImplementedError def compute_advantages( self, values: torch.FloatTensor, rewards: torch.FloatTensor, mask: torch.FloatTensor, ): lastgaelam = 0 advantages_reversed = [] gen_len = rewards.shape[-1] values = values * mask rewards = rewards * mask if self.config.whiten_rewards: rewards = masked_whiten(rewards, mask, shift_mean=False) for t in reversed(range(gen_len)): nextvalues = values[:, t + 1] if t < gen_len - 1 else 0.0 delta = rewards[:, t] + self.config.gamma * nextvalues - values[:, t] lastgaelam = delta + self.config.gamma * self.config.lam * lastgaelam advantages_reversed.append(lastgaelam) advantages = torch.stack(advantages_reversed[::-1]).transpose(0, 1) returns = advantages + values advantages = masked_whiten(advantages, mask) advantages = advantages.detach() return values, advantages, returns def loss( self, old_logprobs: torch.FloatTensor, values: torch.FloatTensor, logits: torch.FloatTensor, vpreds: torch.FloatTensor, logprobs: torch.FloatTensor, mask: torch.LongTensor, advantages: torch.FloatTensor, returns: torch.FloatTensor, ): """ Calculate policy and value losses. Args: old_logprobs (`torch.FloatTensor`): Log probabilities of the model, shape (`batch_size`, `response_length`) values (`torch.FloatTensor`): Values of the value head, shape (`batch_size`, `response_length`) rewards (`torch.FloatTensor`): Rewards from the reward model, shape (`batch_size`, `response_length`) logits (`torch.FloatTensor`): Logits of the model, shape (`batch_size`, `response_length`, `vocab_size`) v_pred (`torch.FloatTensor`): Values of the value head, shape (`batch_size`, `response_length`) logprobs (`torch.FloatTensor`): Log probabilities of the model, shape (`batch_size`, `response_length`) """ vpredclipped = clip_by_value( vpreds, values - self.config.cliprange_value, values + self.config.cliprange_value, ) vf_losses1 = (vpreds - returns) ** 2 vf_losses2 = (vpredclipped - returns) ** 2 vf_loss = 0.5 * masked_mean(torch.max(vf_losses1, vf_losses2), mask) vf_clipfrac = masked_mean(torch.gt(vf_losses2, vf_losses1).float(), mask) ratio = torch.exp(logprobs - old_logprobs) pg_losses = -advantages * ratio pg_losses2 = -advantages * torch.clamp(ratio, 1.0 - self.config.cliprange, 1.0 + self.config.cliprange) pg_loss = masked_mean(torch.max(pg_losses, pg_losses2), mask) pg_clipfrac = masked_mean(torch.gt(pg_losses2, pg_losses).float(), mask) loss = pg_loss + self.config.vf_coef * vf_loss avg_ratio = masked_mean(ratio, mask).item() if avg_ratio > self.config.ratio_threshold: warnings.warn( f"The average ratio of batch ({avg_ratio:.2f}) exceeds threshold {self.config.ratio_threshold:.2f}. Skipping batch." ) pg_loss = pg_loss * 0.0 vf_loss = vf_loss * 0.0 loss = loss * 0.0 entropy = masked_mean(entropy_from_logits(logits), mask) approxkl = 0.5 * masked_mean((logprobs - old_logprobs) ** 2, mask) policykl = masked_mean(old_logprobs - logprobs, mask) return_mean, return_var = masked_mean(returns, mask), masked_var(returns, mask) value_mean, value_var = masked_mean(values, mask), masked_var(values, mask) stats = dict( loss=dict(policy=pg_loss.detach(), value=vf_loss.detach(), total=loss.detach()), policy=dict( entropy=entropy.detach(), approxkl=approxkl.detach(), policykl=policykl.detach(), clipfrac=pg_clipfrac.detach(), advantages=advantages.detach(), advantages_mean=masked_mean(advantages, mask).detach(), ratio=ratio.detach(), ), returns=dict(mean=return_mean.detach(), var=return_var.detach()), val=dict( vpred=masked_mean(vpreds, mask).detach(), error=masked_mean((vpreds - returns) ** 2, mask).detach(), clipfrac=vf_clipfrac.detach(), mean=value_mean.detach(), var=value_var.detach(), ), ) return pg_loss, self.config.vf_coef * vf_loss, flatten_dict(stats) def record_step_stats(self, kl_coef: float, **data): """ Record training step statistics. Args: kl_coef (`float`): KL coefficient data (`dict`): Dictionary of training step data Returns: stats (`dict`): Dictionary of training step statistics """ mask = data.pop("masks") kls = data.pop("kls") kl_list = ((kls) * mask).sum(axis=-1) mean_kl = kl_list.mean() mean_entropy = (-data["logprobs"] * mask).sum(axis=-1).mean() mean_non_score_reward = masked_mean( data["non_score_reward"], mask ) # non_score_reward is size `batch_size`, `response_length` mean_scores = data["scores"].mean() # scores is size `batch_size` std_scores = data["scores"].std() if mean_kl.item() < -1.0: # warn users warnings.warn( f"KL divergence is starting to become negative: {mean_kl.item():.2f} - this might be a precursor for failed training." " sometimes this happens because the generation kwargs are not correctly set. Please make sure" " that the generation kwargs are set correctly, or review your training hyperparameters." ) stats = { "objective/kl": mean_kl, "objective/kl_dist": kl_list, "objective/logprobs": data["logprobs"], "objective/ref_logprobs": data["ref_logprobs"], "objective/kl_coef": kl_coef, "objective/entropy": mean_entropy, "ppo/mean_non_score_reward": mean_non_score_reward, "ppo/mean_scores": mean_scores, "ppo/std_scores": std_scores, } # Log text properties query_lens = torch.tensor([len(query) for query in data["queries"]], dtype=torch.float) response_lens = torch.tensor([len(response) for response in data["responses"]], dtype=torch.float) stats["tokens/queries_len_mean"] = torch.mean(query_lens).cpu().numpy().item() stats["tokens/queries_len_std"] = torch.std(query_lens).cpu().numpy().item() stats["tokens/queries_dist"] = query_lens.cpu().numpy() stats["tokens/responses_len_mean"] = torch.mean(response_lens).cpu().numpy().item() stats["tokens/responses_len_std"] = torch.std(response_lens).cpu().numpy().item() stats["tokens/responses_dist"] = response_lens.cpu().numpy() for k, v in data["train_stats"].items(): stats[f"ppo/{k}"] = torch.mean(v, axis=0) stats["ppo/val/var_explained"] = 1 - stats["ppo/val/error"] / stats["ppo/returns/var"] return stats def log_stats( self, stats: dict, batch: dict, rewards: List[torch.FloatTensor], columns_to_log: typing.Iterable[str] = ("query", "response"), ): """ A function that logs all the training stats. Call it at the end of each epoch. Args: stats (dict[str, Any]): A dictionary of training stats. batch (dict[str, Any]): A dictionary of batch data, this contains the queries and responses. rewards (`List[torch.FloatTensor]`): A tensor of rewards. """ # all gather stats if not isinstance(rewards, torch.Tensor): rewards = torch.tensor(rewards).to(self.current_device) rewards = self.accelerator.gather(rewards).flatten() if self.config.log_with == "wandb": import wandb if any(column_to_log not in batch.keys() for column_to_log in columns_to_log): raise ValueError(f"Columns to log {columns_to_log} are not present in the batch {batch.keys()}.") batch_list = [batch[column_to_log] for column_to_log in columns_to_log] if self.is_distributed: gathered_batch_list = [] for b in batch_list: flattened = gather_object(b) gathered_batch_list.append(flattened) batch_list = gathered_batch_list # Log only if we are in the main process if self.accelerator.is_main_process: logs = {} # Log stats if "query" not in batch.keys() and "response" not in batch.keys(): # warn the user that the game logs will not be logged warnings.warn( "The game logs will not be logged because the batch does not contain the keys 'query' and " "'response'. " ) elif self.config.log_with == "wandb": table_rows = [list(r) for r in zip(*batch_list, rewards.cpu().tolist())] logs.update({"game_log": wandb.Table(columns=[*columns_to_log, "reward"], rows=table_rows)}) logs.update(stats) # manually cast in fp32 for bf16 torch tensors for k, v in logs.items(): if isinstance(v, torch.Tensor) and v.dtype == torch.bfloat16: logs[k] = v.float() logs["env/reward_mean"] = torch.mean(rewards).cpu().numpy().item() logs["env/reward_std"] = torch.std(rewards).cpu().numpy().item() logs["env/reward_dist"] = rewards.cpu().numpy() if self.config.log_with == "tensorboard": # update the current step self.current_step += 1 self.accelerator.log( logs, step=self.current_step if self.config.log_with == "tensorboard" else None, ) def create_model_card(self, path: str, model_name: Optional[str] = "TRL Model") -> None: """Creates and saves a model card for a TRL model. Args: path (`str`): The path to save the model card to. model_name (`str`, *optional*): The name of the model, defaults to `TRL Model`. """ try: user = whoami()["name"] # handle the offline case except Exception: warnings.warn("Cannot retrieve user information assuming you are running in offline mode.") return if not os.path.exists(path): os.makedirs(path) model_card_content = MODEL_CARD_TEMPLATE.format(model_name=model_name, model_id=f"{user}/{path}") with open(os.path.join(path, "README.md"), "w", encoding="utf-8") as f: f.write(model_card_content) def _save_pretrained(self, save_directory: str) -> None: self.accelerator.unwrap_model(self.model).save_pretrained(save_directory) self.tokenizer.save_pretrained(save_directory) self.create_model_card(save_directory) def _show_tokens(self, tokens, masks): from rich import print from rich.text import Text text = Text() for _i, (token, mask) in enumerate(zip(tokens, masks)): if mask == 1: text.append(self.tokenizer.decode(token.item()), style="black on deep_sky_blue1") text.append(" ") else: text.append(self.tokenizer.decode(token.item()), style="black on cyan3") text.append(" ") print(text) def _prepare_deepspeed(self, model: PreTrainedModelWrapper): # Adapted from accelerate: https://github.com/huggingface/accelerate/blob/739b135f8367becb67ffaada12fe76e3aa60fefd/src/accelerate/accelerator.py#L1473 deepspeed_plugin = self.accelerator.state.deepspeed_plugin config_kwargs = deepspeed_plugin.deepspeed_config if model is not None: if hasattr(model, "config"): hidden_size = ( max(model.config.hidden_sizes) if getattr(model.config, "hidden_sizes", None) else getattr(model.config, "hidden_size", None) ) if hidden_size is not None and config_kwargs["zero_optimization"]["stage"] == 3: # Note that `stage3_prefetch_bucket_size` can produce DeepSpeed messages like: `Invalidate trace cache @ step 0: expected module 1, but got module 0` # This is expected and is not an error, see: https://github.com/microsoft/DeepSpeed/discussions/4081 config_kwargs.update( { "zero_optimization.reduce_bucket_size": hidden_size * hidden_size, "zero_optimization.stage3_param_persistence_threshold": 10 * hidden_size, "zero_optimization.stage3_prefetch_bucket_size": 0.9 * hidden_size * hidden_size, } ) # If ZeRO-3 is used, we shard both the active and reference model. # Otherwise, we assume the reference model fits in memory and is initialized on each device with ZeRO disabled (stage 0) if config_kwargs["zero_optimization"]["stage"] != 3: config_kwargs["zero_optimization"]["stage"] = 0 model, *_ = deepspeed.initialize(model=model, config=config_kwargs) model.eval() return model
trl/trl/trainer/ppo_trainer.py/0
{ "file_path": "trl/trl/trainer/ppo_trainer.py", "repo_id": "trl", "token_count": 29719 }
426
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # How to contribute to 🤗 Accelerate? Everyone is welcome to contribute, and we value everybody's contribution. Code is thus not the only way to help the community. Answering questions, helping others, reaching out and improving the documentations are immensely valuable to the community. It also helps us if you spread the word: reference the library from blog posts on the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply star the repo to say "thank you". Whichever way you choose to contribute, please be mindful to respect our [code of conduct](https://github.com/huggingface/accelerate/blob/main/CODE_OF_CONDUCT.md). ## You can contribute in so many ways! Some of the ways you can contribute to Accelerate: * Fixing outstanding issues with the existing code; * Contributing to the examples or to the documentation; * Submitting issues related to bugs or desired new features. ## Submitting a new issue or feature request Do your best to follow these guidelines when submitting an issue or a feature request. It will make it easier for us to come back to you quickly and with good feedback. ### Did you find a bug? The 🤗 Accelerate library is robust and reliable thanks to the users who notify us of the problems they encounter. So thank you for reporting an issue. First, we would really appreciate it if you could **make sure the bug was not already reported** (use the search bar on Github under Issues). Did not find it? :( So we can act quickly on it, please follow these steps: * Include your **OS type and version**, the versions of **Python** and **PyTorch**. * A short, self-contained, code snippet that allows us to reproduce the bug in less than 30s; * Provide the with your Accelerate configuration (located by default in `~/.cache/huggingface/accelerate/default_config.yaml`) ### Do you want a new feature? A good feature request addresses the following points: 1. Motivation first: * Is it related to a problem/frustration with the library? If so, please explain why. Providing a code snippet that demonstrates the problem is best. * Is it related to something you would need for a project? We'd love to hear about it! * Is it something you worked on and think could benefit the community? Awesome! Tell us what problem it solved for you. 2. Write a *full paragraph* describing the feature; 3. Provide a **code snippet** that demonstrates its future use; 4. In case this is related to a paper, please attach a link; 5. Attach any additional information (drawings, screenshots, etc.) you think may help. If your issue is well written we're already 80% of the way there by the time you post it. ## Submitting a pull request (PR) Before writing code, we strongly advise you to search through the existing PRs or issues to make sure that nobody is already working on the same thing. If you are unsure, it is always a good idea to open an issue to get some feedback. You will need basic `git` proficiency to be able to contribute to 🤗 Accelerate. `git` is not the easiest tool to use but it has the greatest manual. Type `git --help` in a shell and enjoy. If you prefer books, [Pro Git](https://git-scm.com/book/en/v2) is a very good reference. Follow these steps to start contributing: 1. Fork the [repository](https://github.com/huggingface/accelerate) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote. The following command assumes you have your public SSH key uploaded to GitHub. See the following guide for more [information](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository). ```bash $ git clone git@github.com:<your Github handle>/accelerate.git $ cd accelerate $ git remote add upstream https://github.com/huggingface/accelerate.git ``` 3. Create a new branch to hold your development changes, and do this for every new PR you work on. Start by synchronizing your `main` branch with the `upstream/main` branch (ore details in the [GitHub Docs](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork)): ```bash $ git checkout main $ git fetch upstream $ git merge upstream/main ``` Once your `main` branch is synchronized, create a new branch from it: ```bash $ git checkout -b a-descriptive-name-for-my-changes ``` **Do not** work on the `main` branch. 4. Set up a development environment by running the following command in a conda or a virtual environment you've created for working on this library: ```bash $ pip install -e ".[dev]" ``` This will install all testing and linting/code quality dependencies for the library (see `quality`, `test_dev`, `test_prod` targets in [`setup.py`](./setup.py)). (If accelerate was already installed in the virtual environment, remove it with `pip uninstall accelerate` before reinstalling it in editable mode with the `-e` flag). Alternatively, if you are using [Visual Studio Code](https://code.visualstudio.com/Download), the fastest way to get set up is by using the provided Dev Container. Documentation on how to get started with dev containers is available [here](https://code.visualstudio.com/docs/remote/containers). 5. Develop the features on your branch. As you work on the features, you should make sure that the test suite passes. You should run the tests impacted by your changes like this (see below an explanation regarding the environment variable): ```bash $ pytest tests/<TEST_TO_RUN>.py ``` > For the following commands leveraging the `make` utility, we recommend using the WSL system when running on > Windows. More information [here](https://docs.microsoft.com/en-us/windows/wsl/about). You can also run the full suite with the following command. ```bash $ make test ``` `accelerate` relies on `ruff` to format its source code consistently. After you make changes, apply automatic style corrections and code verifications that can't be automated in one go with: This target is also optimized to only work with files modified by the PR you're working on. If you prefer to run the checks one after the other, the following command apply the style corrections: ```bash $ make style ``` `accelerate` also uses a few custom scripts to check for coding mistakes. Quality control runs in CI, however you can also run the same checks with: ```bash $ make quality ``` You can also set up [`pre-commit`](https://pre-commit.com/) to run these checks automatically as Git commit hooks. ```bash $ pip install pre-commit $ pre-commit install ``` Once you're happy with your changes, add changed files using `git add` and make a commit with `git commit` to record your changes locally: ```bash $ git add modified_file.py $ git commit ``` Please write [good commit messages](https://chris.beams.io/posts/git-commit/). It is a good idea to sync your copy of the code with the original repository regularly. This way you can quickly account for changes: ```bash $ git fetch upstream $ git rebase upstream/main ``` Push the changes to your account using: ```bash $ git push -u origin a-descriptive-name-for-my-changes ``` 6. Once you are satisfied (**and the checklist below is happy too**), go to the webpage of your fork on GitHub. Click on 'Pull request' to send your changes to the project maintainers for review. 7. It's ok if maintainers ask you for changes. It happens to core contributors too! So everyone can see the changes in the Pull request, work in your local branch and push the changes to your fork. They will automatically appear in the pull request. ### Checklist 1. The title of your pull request should be a summary of its contribution; 2. If your pull request addresses an issue, please mention the issue number in the pull request description to make sure they are linked (and people consulting the issue know you are working on it); 3. To indicate a work in progress please prefix the title with `[WIP]`, or mark the PR as a draft PR. These are useful to avoid duplicated work, and to differentiate it from PRs ready to be merged; 4. Make sure existing tests pass; 5. Add high-coverage tests. No quality testing = no merge. See an example of a good PR here: https://github.com/huggingface/accelerate/pull/255 ### Tests An extensive test suite is included to test the library behavior and several examples. Library tests can be found in the [tests folder](https://github.com/huggingface/accelerate/tree/main/tests). We use `pytest` in order to run the tests. From the root of the repository, here's how to run tests with `pytest` for the library: ```bash $ python -m pytest -sv ./tests ``` In fact, that's how `make test` is implemented (sans the `pip install` line)! You can specify a smaller set of tests in order to test only the feature you're working on.
accelerate/CONTRIBUTING.md/0
{ "file_path": "accelerate/CONTRIBUTING.md", "repo_id": "accelerate", "token_count": 2693 }
0
# Builds CPU-only Docker image of PyTorch # Uses multi-staged approach to reduce size # Stage 1 FROM python:3.8-slim as compile-image ARG DEBIAN_FRONTEND=noninteractive RUN apt update RUN apt-get install -y --no-install-recommends \ build-essential \ git \ gcc # Setup virtual environment for Docker ENV VIRTUAL_ENV=/opt/venv RUN python3 -m venv ${VIRTUAL_ENV} # Make sure we use the virtualenv ENV PATH="${VIRTUAL_ENV}/bin:$PATH" WORKDIR /workspace # Install specific CPU torch wheel to save on space RUN python3 -m pip install --upgrade --no-cache-dir pip RUN python3 -m pip install --no-cache-dir \ jupyter \ git+https://github.com/huggingface/accelerate#egg=accelerate[testing,test_trackers] \ --extra-index-url https://download.pytorch.org/whl/cpu # Stage 2 FROM python:3.8-slim AS build-image COPY --from=compile-image /opt/venv /opt/venv RUN useradd -ms /bin/bash user USER user # Make sure we use the virtualenv ENV PATH="/opt/venv/bin:$PATH" CMD ["/bin/bash"]
accelerate/docker/accelerate-cpu/Dockerfile/0
{ "file_path": "accelerate/docker/accelerate-cpu/Dockerfile", "repo_id": "accelerate", "token_count": 380 }
1
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Moving between FSDP And DeepSpeed 🤗 Accelerate offers flexibilty of training frameworks, by integrating two extremely powerful tools for distributed training, namely [Pytorch FSDP](../usage_guides/fsdp) and [Microsoft DeepSpeed](../usage_guides/deepspeed). The aim of this tutorial is to draw parallels, as well as to outline potential differences, to empower the user to switch seamlessly between these two frameworks. <Tip> To switch between the frameworks, we recommend launching code 🤗 `accelerate launch` passing in the correct config file with `--config_file`, or passing in the respective arguments directly for [FSDP and DeepSpeed](../package_reference/cli#accelerate-launch) . Example 🤗 Accelerate configurations can be found here for [DeepSpeed](../usage_guides/deepspeed#accelerate-deepspeed-plugin) and [FSDP](../usage_guides/fsdp#how-it-works-out-of-the-box), or in the [example zoo under "Launch Configurations"](../usage_guides/explore) </Tip> <Tip warning={true}> This tutorial is for single-node, multi-GPU, scenarios only. </Tip> ## Configuring Functionalities Model tensors are split into different GPUs in an attempt to scale up model sizes; this is termed *sharding* in FSDP, and *partitioning* in DeepSpeed. FSDP sharding and DeepSpeed ZeRO (partitioning) stages are configured by `--fsdp_sharding_strategy`, and `--zero_stage`, respectively. In particular, FSDP `FULL_SHARD` maps to DeepSpeed ZeRO stage `3`; see this [comprehensive mapping between FSDP sharding and DeepSpeed ZeRO settings](../usage_guides/fsdp#mapping-between-fsdp-sharding-strategies-and-deepspeed-zero-stages). The below table summarizes and groups similar settings: Group | Framework | Configuration | Example | Restrictions (if any) --|--|--|--|-- sharding / partitioning | FSDP<br>DeepSpeed | `--fsdp_sharding_strategy`<br>`--zero_stage` | `1` (`FULL_SHARD`) <br>`3` | offload | FSDP<br>DeepSpeed | `--fsdp_offload_params`<br>`--offload_param_device`<br>`--offload_optimizer_device` | `true`<br>`cpu`<br>`cpu` | all or nothing <br><br> model loading | FSDP<br>DeepSpeed | <span style="white-space:nowrap;">`--fsdp_cpu_ram_efficient_loading`</span><br>`--zero3_init_flag` | `true`<br>`true` | <br>only ZeRO 3 efficient checkpointing | FSDP<br>DeepSpeed | `--fsdp_state_dict_type`<br>`--zero3_save_16bit_model` | `SHARDED_STATE_DICT`<br>`true` | <br>only ZeRO 3 weights prefetching | FSDP<br><br>DeepSpeed | `--fsdp_forward_prefetch`<br>`--fsdp_backward_prefetch`<br>None | `true`<br>`BACKWARD_PRE` | <br><br> model | FSDP<br><br>DeepSpeed | `--fsdp_auto_wrap_policy`<br><span style="white-space:nowrap;">`--fsdp_transformer_layer_cls_to_wrap`</span><br>None | `TRANSFORMER_BASED_WRAP`<br><Layer Class> |<br>Usually not needed <br>Transparent to user. parameters summoning | FSDP<br>DeepSpeed | `--fsdp_use_orig_params`<br>None | `true` | required for `torch.compile`<br>Transparent to user parameters syncing | FSDP<br>DeepSpeed | `--fsdp_sync_module_states`<br>None | `true` | training | FSDP<br>DeepSpeed | None<br>`--gradient_accumulation_steps`<br>`--gradient_clipping` | <br>`auto`<br>`auto` | Transparent to user For detailed descriptions of the above, refer to [🤗 `Accelerate` launch documentation](../package_reference/cli#accelerate-launch). <Tip> To access other DeepSpeed configurations, such as mixed precision settings, you need to pass in a `--deepspeed_config_file`, see the [documentation](../usage_guides/deepspeed#deepspeed-config-file). DeepSpeed can be also configured via [`DeepSpeedPlugin`], e.g., `DeepSpeedPlugin.zero_stage` is equivalent of `--zero_stage`, and `DeepSpeedPlugin.hf_ds_config` can be used to pass `--deepeed_config_file.` </Tip> <Tip> FSDP can be also configured via [`FullyShardedDataParallelPlugin`], e.g., `FullyShardedDataParallelPlugin.sharding_strategy` is equivalent of `--fsdp_sharding_strategy`. </Tip> ### Checkpointing Do note that while FSDP can be configured via `--fsdp_state_dict_type` to save either full / sharded checkpoints. <Tip> For DeepSpeed Zero3, one could pass a `--zero3_save_16bit_model true`, which conveniently consolidates the model to a single rank and saves; this is the FSDP equivalent of `fsdp_state_dict_type: FULL_STATE_DICT`. </Tip> <Tip warning={true}> For large models, consolidating the model to a single rank can be very slow. </Tip> <Tip> For quicker checkpointing, for FSDP use `fsdp_state_dict_type: SHARDED_STATE_DICT`, and for DeepSpeed Zero3 [use the `zero_to_fp32.py` script to post-convert sharded checkpoints](https://www.deepspeed.ai/tutorials/zero/#extracting-weights). </Tip> ### Offloading FSDP only allows *all-or-nothing* offload (i.e., either offload parameters, gradients, and optimizer, or keep them all in GPU), but DeepSpeed can offload parameters and optimizer differently. Furthermore, DeepSpeed also supports [offloading to NVME](https://www.deepspeed.ai/docs/config-json/#parameter-offloading). ### Prefetching FSDP allows two prefetching configurations `--fsdp_forward_prefetch` and `--fsdp_backward_prefetch` to improve overlap of comms / computation at a cost of extra memory, see [FSDP documentation](https://pytorch.org/docs/stable/fsdp.html). For DeepSpeed, the prefetching will be turned on when needed, and it turns on depending on certain hyper-params like `stage3_param_persistence_threshold`, `stage3_max_reuse_distance`, etc, [that can be configured for Zero3](https://www.deepspeed.ai/docs/config-json/#parameter-offloading); 🤗 `accelerate` may set these hyper-params automatically if you don't set those explicitly in the deepspeed config file. <Tip> For FSDP set `fsdp_backward_prefetch: BACKWARD_PRE` for improved throughputs if memory allows. </Tip> ### Model Loading While FSDP require an explicit `--fsdp_cpu_ram_efficient_loading true` to activate efficient model loading, 🤗 `transformers` will activate the similar feature whenever DeepSpeed Zero3 is used. <Tip> For FSDP, whenever setting `--fsdp_cpu_ram_efficient_loading true`, 🤗 `accelerate` will automatically set `sync_module_states` to true. For RAM efficient loading the weights will be loaded only in a singe rank, and thus requires `sync_module_states` to broadcast weights to other ranks. </Tip> ### Model FSDP requires an explicit `--fsdp_auto_wrap_policy` for the algorithm to decide how to schedule the all-gather and reduce-scatter operations. But for DeepSpeed this is transparent to the user. <Tip> For FSDP, simply set `fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP`. With the latest [`transformers`] versions, we try our best to figure out the suitable `fsdp_transformer_layer_cls_to_wrap` for HF transformers models. However, if you get an error regarding it, please specify this. </Tip> ### Parameters Summoning FSDP requires an explicit `--fsdp_use_orig_params` flag if using `torch.compile`, see [the pytorch documenation](https://pytorch.org/docs/stable/fsdp.html#module-torch.distributed.fsdp). For DeepSpeed this is transparent to the user. <Tip> For FSDP, when using `torch.compile` please set `fsdp_use_orig_params: True`. </Tip> ## Training Deepspeed requires explicit `--gradient_accumulation_steps` and `--gradient_clipping` flags. For FSDP this is transparent to the user. <Tip> When using DeepSpeed, set `gradient_accumulation_steps: "auto"` and `gradient_clipping: "auto"` to automatically pick up values set in the [`Accelerator`] or [`TrainingArguments`] (if using `transformers`). </Tip> ## On Differences in Data Precision Handling To discuss the how data precision is handled in both FSDP and Deepspeed, it is instructive to first give an overview of how model parameters are handled in these frameworks. Before the model / optimizer parameters are distributed across GPUs, parameter preparation is involved to first "flatten" them to one-dimensional [`torch.Tensor`](https://pytorch.org/docs/stable/tensors.html#torch-tensor). The implementation of FSDP / DeepSpeed varies in the respect of the `dtype` in which these "flattened" parameters are stored, and there are ramifications with regards to how [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) allocate their `dtype`s. The table below outlines the processes for both frameworks; the "Local" column indicates the process occurring at a per-gpu level, therefore any memory overheads by upcasting should be understood to be amortized by the number of gpus used. <Tip> As a rule of thumb, for stable training with automatic mixed precision, all the trainable parameters have to be in `torch.float32`. </Tip> Process | Local | Framework | Details --|--|--|-- Loading, i.e., [`AutoModel.from_pretrained(..., torch_dtype=torch_dtype)`] | Preparation, i.e., creation of "flat params" | ✅ | FSDP<br>DeepSpeed | created in `torch_dtype`.<br> disregards `torch_dtype`, created in `float32`. Optimizer initialization | ✅ | FSDP<br>DeepSpeed | creates parameters in `torch_dtype`<br> creates parameters in `float32` Training Step, i.e, forward, backward, reduction | | FSDP<br>DeepSpeed | follows [`MixedPrecision`](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.MixedPrecision)<br> follows `deepspeed_config_file` mixed precision settings. Optimizer (Pre-Step) | ✅ | FSDP<br>DeepSpeed | upcasting (if any) to `torch_dtype`<br>upcasted to `float32` Optimizer (Actual Step) | ✅ | FSDP<br>DeepSpeed | occurs in `torch_dtype` <br> occurs in `float32`. <Tip warning={true}> Therefore when using DeepSpeed a small number of GPUs, be aware of potentially significant memory overheads due to the upcasting during preperation. </Tip> <Tip> With FSDP, in the absence of mixed precision, it is possible to operate the [`torch.Optimizer`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) in low precision `torch_dtype`, which may be helpful when using small number of GPUs. </Tip> <Tip warning={true}> With mixed precision, FSDP and DeepSpeed will upcast in the model preparation step (c.f. table above). But do note that FSDP will then save checkpoints in the upcasted precision; Deepspeed may still save low precision checkpoints if `--zero3_save_16bit_model` is specified. </Tip> To clarify the above table consider the concrete examples below; the optimizer pre- and actual step combined for brevity. With FSDP it is possible to operate in the two modes shown below, but DeepSpeed can only operate in one. Framework | Model Loading (`torch_dtype`) | Mixed Precision | Preparation (Local) | Training | Optimizer (Local) --|--|--|--|--|-- FSDP | bf16 | default (none) | bf16 | bf16 | bf16 FSDP | bf16 | bf16 | fp32 | bf16 | fp32 DeepSpeed | bf16 | bf16 | fp32 | bf16 | fp32
accelerate/docs/source/concept_guides/fsdp_and_deepspeed.md/0
{ "file_path": "accelerate/docs/source/concept_guides/fsdp_and_deepspeed.md", "repo_id": "accelerate", "token_count": 3547 }
2
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Fully Sharded Data Parallel To accelerate training huge models on larger batch sizes, we can use a fully sharded data parallel model. This type of data parallel paradigm enables fitting more data and larger models by sharding the optimizer states, gradients and parameters. To read more about it and the benefits, check out the [Fully Sharded Data Parallel blog](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/). We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature. All you need to do is enable it through the config. ## How it works out of the box On your machine(s) just run: ```bash accelerate config ``` and answer the questions asked. This will generate a config file that will be used automatically to properly set the default options when doing ```bash accelerate launch my_script.py --args_to_my_script ``` For instance, here is how you would run `examples/nlp_example.py` (from the root of the repo) with FSDP enabled: ```bash compute_environment: LOCAL_MACHINE debug: false distributed_type: FSDP downcast_bf16: 'no' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_forward_prefetch: false fsdp_cpu_ram_efficient_loading: true fsdp_offload_params: false fsdp_sharding_strategy: FULL_SHARD fsdp_state_dict_type: SHARDED_STATE_DICT fsdp_sync_module_states: true fsdp_transformer_layer_cls_to_wrap: BertLayer fsdp_use_orig_params: true machine_rank: 0 main_training_function: main mixed_precision: bf16 num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_env: [] tpu_use_cluster: false tpu_use_sudo: false use_cpu: false ``` ```bash accelerate launch examples/nlp_example.py ``` Currently, `Accelerate` supports the following config through the CLI: `fsdp_sharding_strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD (DDP), [4] HYBRID_SHARD (shards optimizer states, gradients and parameters within each node while each node has full copy), [5] HYBRID_SHARD_ZERO2 (shards optimizer states and gradients within each node while each node has full copy). For more information, please refer the official [PyTorch docs](https://pytorch.org/docs/stable/fsdp.html#torch.distributed.fsdp.ShardingStrategy). `fsdp_offload_params` : Decides Whether to offload parameters and gradients to CPU `fsdp_auto_wrap_policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP `fsdp_transformer_layer_cls_to_wrap`: Only applicable for 🤗 Transformers. When using `fsdp_auto_wrap_policy=TRANSFORMER_BASED_WRAP`, a user may provide a comma-separated string of transformer layer class names (case-sensitive) to wrap, e.g., `BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`. This is important because submodules that share weights (e.g., embedding layers) should not end up in different FSDP wrapped units. Using this policy, wrapping happens for each block containing Multi-Head Attention followed by a couple of MLP layers. Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit. Therefore, use this for transformer-based models. You can use the `model._no_split_modules` for 🤗 Transformer models by answering `yes` to `Do you want to use the model's `_no_split_modules` to wrap. It will try to use `model._no_split_modules` when possible. `fsdp_min_num_params`: minimum number of parameters when using `fsdp_auto_wrap_policy=SIZE_BASED_WRAP`. `fsdp_backward_prefetch_policy`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH `fsdp_forward_prefetch`: if True, then FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. Should only be used for static-graph models since the prefetching follows the first iteration’s execution order. i.e., if the sub-modules' order changes dynamically during the model's execution do not enable this feature. `fsdp_state_dict_type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT `fsdp_use_orig_params`: If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable parameters. This setting is useful in cases such as parameter-efficient fine-tuning as discussed in [this post](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). This option also allows one to have multiple optimizer param groups. This should be `True` when creating an optimizer before preparing/wrapping the model with FSDP. `fsdp_cpu_ram_efficient_loading`: Only applicable for 🤗 Transformers models. If True, only the first process loads the pretrained model checkpoint while all other processes have empty weights. This should be set to False if you experience errors when loading the pretrained 🤗 Transformers model via `from_pretrained` method. When this setting is True `fsdp_sync_module_states` also must to be True, otherwise all the processes except the main process would have random weights leading to unexpected behaviour during training. For this to work, make sure the distributed process group is initialized before calling Transformers `from_pretrained` method. When using 🤗 Trainer API, the distributed process group is initialized when you create an instance of `TrainingArguments` class. `fsdp_sync_module_states`: If True, each individually wrapped FSDP unit will broadcast module parameters from rank 0. For additional and more nuanced control, you can specify other FSDP parameters via `FullyShardedDataParallelPlugin`. When creating `FullyShardedDataParallelPlugin` object, pass it the parameters that weren't part of the accelerate config or if you want to override them. The FSDP parameters will be picked based on the accelerate config file or launch command arguments and other parameters that you will pass directly through the `FullyShardedDataParallelPlugin` object will set/override that. Below is an example: ```py from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullOptimStateDictConfig, FullStateDictConfig fsdp_plugin = FullyShardedDataParallelPlugin( state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False), optim_state_dict_config=FullOptimStateDictConfig(offload_to_cpu=False, rank0_only=False), ) accelerator = Accelerator(fsdp_plugin=fsdp_plugin) ``` ## Saving and loading The new recommended way of checkpointing when using FSDP models is to use `SHARDED_STATE_DICT` as `StateDictType` when setting up the accelerate config. Below is the code snippet to save using `save_state` utility of accelerate. ```py accelerator.save_state("ckpt") ``` Inspect the checkpoint folder to see model and optimizer as shards per process: ``` ls ckpt # optimizer_0 pytorch_model_0 random_states_0.pkl random_states_1.pkl scheduler.bin cd ckpt ls optimizer_0 # __0_0.distcp __1_0.distcp ls pytorch_model_0 # __0_0.distcp __1_0.distcp ``` To load them back for resuming the training, use the `load_state` utility of accelerate ```py accelerator.load_state("ckpt") ``` When using transformers `save_pretrained`, pass `state_dict=accelerator.get_state_dict(model)` to save the model state dict. Below is an example: ```diff unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, + state_dict=accelerator.get_state_dict(model), ) ``` ### State Dict `accelerator.get_state_dict` will call the underlying `model.state_dict` implementation using `FullStateDictConfig(offload_to_cpu=True, rank0_only=True)` context manager to get the state dict only for rank 0 and it will be offloaded to CPU. You can then pass `state` into the `save_pretrained` method. There are several modes for `StateDictType` and `FullStateDictConfig` that you can use to control the behavior of `state_dict`. For more information, see the [PyTorch documentation](https://pytorch.org/docs/stable/fsdp.html). If you choose to use `StateDictType.SHARDED_STATE_DICT`, the weights of the model during `Accelerator.save_state` will be split into `n` files for each sub-split on the model. To merge them back into a single dictionary to load back into the model later after training you can use the `merge_weights` utility: ```py from accelerate.utils import merge_fsdp_weights # Our weights are saved usually in a `pytorch_model_fsdp_{model_number}` folder merge_fsdp_weights("pytorch_model_fsdp_0", "output_path", safe_serialization=True) ``` The final output will then either be saved to `model.safetensors` or `pytorch_model.bin` (if `safe_serialization=False` is passed). This can also be called using the CLI: ```bash accelerate merge-weights pytorch_model_fsdp_0/ output_path ``` ## Mapping between FSDP sharding strategies and DeepSpeed ZeRO Stages * `FULL_SHARD` maps to the DeepSpeed `ZeRO Stage-3`. Shards optimizer states, gradients and parameters. * `SHARD_GRAD_OP` maps to the DeepSpeed `ZeRO Stage-2`. Shards optimizer states and gradients. * `NO_SHARD` maps to `ZeRO Stage-0`. No sharding wherein each GPU has full copy of model, optimizer states and gradients. * `HYBRID_SHARD` maps to `ZeRO++ Stage-3` wherein `zero_hpz_partition_size=<num_gpus_per_node>`. Here, this will shard optimizer states, gradients and parameters within each node while each node has full copy. ## A few caveats to be aware of - In case of multiple models, pass the optimizers to the prepare call in the same order as corresponding models else `accelerator.save_state()` and `accelerator.load_state()` will result in wrong/unexpected behaviour. - This feature is incompatible with `--predict_with_generate` in the `run_translation.py` script of 🤗 `Transformers` library. For more control, users can leverage the `FullyShardedDataParallelPlugin`. After creating an instance of this class, users can pass it to the Accelerator class instantiation. For more information on these options, please refer to the PyTorch [FullyShardedDataParallel](https://github.com/pytorch/pytorch/blob/0df2e863fbd5993a7b9e652910792bd21a516ff3/torch/distributed/fsdp/fully_sharded_data_parallel.py#L236) code. <Tip> For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed)! </Tip>
accelerate/docs/source/usage_guides/fsdp.md/0
{ "file_path": "accelerate/docs/source/usage_guides/fsdp.md", "repo_id": "accelerate", "token_count": 3362 }
3
# Config Zoo This folder contains a variety of minimal configurations for `Accelerate` achieving certain goals. You can use these direct config YAML's, or build off of them for your own YAML's. These are highly annoted versions, aiming to teach you what each section does. Each config can be run via `accelerate launch --config_file {file} run_me.py` `run_me.py` will then print out how the current environment is setup (the contents of the `AcceleratorState`)
accelerate/examples/config_yaml_templates/README.md/0
{ "file_path": "accelerate/examples/config_yaml_templates/README.md", "repo_id": "accelerate", "token_count": 124 }
4
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from diffusers import DiffusionPipeline from accelerate import PartialState # Can also be Accelerator or AcceleratorState pipe = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) distributed_state = PartialState() pipe.to(distributed_state.device) # Assume two processes # On the first GPU, the prompts will be ["a dog", "a cat"], # and on the second GPU it will be ["a chicken", "a chicken"]. # Make sure to drop the final sample, as it will be a duplicate of the previous one. with distributed_state.split_between_processes(["a dog", "a cat", "a chicken"], apply_padding=True) as prompt: result = pipe(prompt).images
accelerate/examples/inference/distributed/stable_diffusion.py/0
{ "file_path": "accelerate/examples/inference/distributed/stable_diffusion.py", "repo_id": "accelerate", "token_count": 363 }
5
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from manim import * class Stage2(Scene): def construct(self): mem = Rectangle(height=0.5,width=0.5) fill = Rectangle(height=0.46,width=0.46).set_stroke(width=0) cpu_left_col_base = [mem.copy() for i in range(6)] cpu_right_col_base = [mem.copy() for i in range(6)] cpu_left_col = VGroup(*cpu_left_col_base).arrange(UP, buff=0) cpu_right_col = VGroup(*cpu_right_col_base).arrange(UP, buff=0) cpu_rects = VGroup(cpu_left_col,cpu_right_col).arrange(RIGHT, buff=0) cpu_text = Text("CPU", font_size=24) cpu = Group(cpu_rects,cpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) cpu.move_to([-2.5,-.5,0]) self.add(cpu) gpu_base = [mem.copy() for i in range(4)] gpu_rect = VGroup(*gpu_base).arrange(UP,buff=0) gpu_text = Text("GPU", font_size=24) gpu = Group(gpu_rect,gpu_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) gpu.move_to([-1,-1,0]) self.add(gpu) model_base = [mem.copy() for i in range(6)] model_rect = VGroup(*model_base).arrange(RIGHT,buff=0) model_text = Text("Model", font_size=24) model = Group(model_rect,model_text).arrange(DOWN, buff=0.5, aligned_edge=DOWN) model.move_to([3, -1., 0]) self.add(model) cpu_targs = [] for i,rect in enumerate(model_base): rect.set_stroke(YELLOW) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) cpu_target = Rectangle(height=0.46/4,width=0.46/3).set_stroke(width=0.).set_fill(YELLOW, opacity=0.7) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN+LEFT), buff=0.02, direction=UP) cpu_target.set_x(cpu_target.get_x()+0.1) elif i == 3: cpu_target.next_to(cpu_targs[0], direction=UP, buff=0.) else: cpu_target.next_to(cpu_targs[i-1], direction=RIGHT, buff=0.) self.add(cpu_target) cpu_targs.append(cpu_target) checkpoint_base = [mem.copy() for i in range(6)] checkpoint_rect = VGroup(*checkpoint_base).arrange(RIGHT,buff=0) checkpoint_text = Text("Loaded Checkpoint", font_size=24) checkpoint = Group(checkpoint_rect,checkpoint_text).arrange(DOWN, aligned_edge=DOWN, buff=0.4) checkpoint.move_to([3, .5, 0]) key = Square(side_length=2.2) key.move_to([-5, 2, 0]) key_text = MarkupText( f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model", font_size=18, ) key_text.move_to([-5, 2.4, 0]) self.add(key_text, key) blue_text = MarkupText( f"<span fgcolor='{BLUE}'>●</span> Checkpoint", font_size=18, ) blue_text.next_to(key_text, DOWN*2.4, aligned_edge=key_text.get_left()) step_2 = MarkupText( f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.', font_size=24 ) step_2.move_to([2, 2, 0]) self.play( Write(step_2), Write(blue_text) ) self.play( Write(checkpoint_text, run_time=1), Create(checkpoint_rect, run_time=1) ) first_animations = [] second_animations = [] for i,rect in enumerate(checkpoint_base): target = fill.copy().set_fill(BLUE, opacity=0.7) target.move_to(rect) first_animations.append(GrowFromCenter(target, run_time=1)) cpu_target = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i+1]) else: cpu_target.target.move_to(cpu_right_col_base[i-5]) second_animations.append(MoveToTarget(cpu_target, run_time=1.5)) self.play(*first_animations) self.play(*second_animations) self.wait()
accelerate/manim_animations/big_model_inference/stage_2.py/0
{ "file_path": "accelerate/manim_animations/big_model_inference/stage_2.py", "repo_id": "accelerate", "token_count": 2354 }
6
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from contextlib import contextmanager from functools import wraps from typing import Dict, List, Optional, Union import torch import torch.nn as nn from .hooks import ( AlignDevicesHook, CpuOffload, UserCpuOffloadHook, add_hook_to_module, attach_align_device_hook, attach_align_device_hook_on_blocks, ) from .utils import ( OffloadedWeightsLoader, check_cuda_p2p_ib_support, check_device_map, extract_submodules_state_dict, find_tied_parameters, get_balanced_memory, infer_auto_device_map, is_mlu_available, is_musa_available, is_npu_available, is_torch_version, is_xpu_available, load_checkpoint_in_model, offload_state_dict, parse_flag_from_env, retie_parameters, ) from .utils.other import recursive_getattr logger = logging.getLogger(__name__) @contextmanager def init_empty_weights(include_buffers: bool = None): """ A context manager under which models are initialized with all parameters on the meta device, therefore creating an empty model. Useful when just initializing the model would blow the available RAM. Args: include_buffers (`bool`, *optional*): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn from accelerate import init_empty_weights # Initialize a model with 100 billions parameters in no time and without using any RAM. with init_empty_weights(): tst = nn.Sequential(*[nn.Linear(10000, 10000) for _ in range(1000)]) ``` <Tip warning={true}> Any model created under this context manager has no weights. As such you can't do something like `model.to(some_device)` with it. To load weights inside your empty model, see [`load_checkpoint_and_dispatch`]. Make sure to overwrite the default device_map param for [`load_checkpoint_and_dispatch`], otherwise dispatch is not called. </Tip> """ if include_buffers is None: include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False) with init_on_device(torch.device("meta"), include_buffers=include_buffers) as f: yield f @contextmanager def init_on_device(device: torch.device, include_buffers: bool = None): """ A context manager under which models are initialized with all parameters on the specified device. Args: device (`torch.device`): Device to initialize all parameters on. include_buffers (`bool`, *optional*): Whether or not to also put all buffers on the meta device while initializing. Example: ```python import torch.nn as nn from accelerate import init_on_device with init_on_device(device=torch.device("cuda")): tst = nn.Liner(100, 100) # on `cuda` device ``` """ if include_buffers is None: include_buffers = parse_flag_from_env("ACCELERATE_INIT_INCLUDE_BUFFERS", False) # TODO(shingjan): remove the torch version check once older versions are deprecated if is_torch_version(">=", "2.0") and include_buffers: with device: yield return old_register_parameter = nn.Module.register_parameter if include_buffers: old_register_buffer = nn.Module.register_buffer def register_empty_parameter(module, name, param): old_register_parameter(module, name, param) if param is not None: param_cls = type(module._parameters[name]) kwargs = module._parameters[name].__dict__ kwargs["requires_grad"] = param.requires_grad module._parameters[name] = param_cls(module._parameters[name].to(device), **kwargs) def register_empty_buffer(module, name, buffer, persistent=True): old_register_buffer(module, name, buffer, persistent=persistent) if buffer is not None: module._buffers[name] = module._buffers[name].to(device) # Patch tensor creation if include_buffers: tensor_constructors_to_patch = { torch_function_name: getattr(torch, torch_function_name) for torch_function_name in ["empty", "zeros", "ones", "full"] } else: tensor_constructors_to_patch = {} def patch_tensor_constructor(fn): def wrapper(*args, **kwargs): kwargs["device"] = device return fn(*args, **kwargs) return wrapper try: nn.Module.register_parameter = register_empty_parameter if include_buffers: nn.Module.register_buffer = register_empty_buffer for torch_function_name in tensor_constructors_to_patch.keys(): setattr(torch, torch_function_name, patch_tensor_constructor(getattr(torch, torch_function_name))) yield finally: nn.Module.register_parameter = old_register_parameter if include_buffers: nn.Module.register_buffer = old_register_buffer for torch_function_name, old_torch_function in tensor_constructors_to_patch.items(): setattr(torch, torch_function_name, old_torch_function) def cpu_offload( model: nn.Module, execution_device: Optional[torch.device] = None, offload_buffers: bool = False, state_dict: Optional[Dict[str, torch.Tensor]] = None, preload_module_classes: Optional[List[str]] = None, ): """ Activates full CPU offload for a model. As a result, all parameters of the model will be offloaded and only one copy of the state dict of the model will be kept. During the forward pass, parameters will be extracted from that state dict and put on the execution device passed as they are needed, then offloaded again. Args: model (`torch.nn.Module`): The model to offload. execution_device (`torch.device`, *optional*): The device on which the forward pass of the model will be executed (should be a GPU). Will default to the model first parameter device. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. state_dict (`Dict[str, torch.Tensor]`, *optional*): The state dict of the model that will be kept on CPU. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. """ if execution_device is None: execution_device = next(iter(model.parameters())).device if state_dict is None: state_dict = {n: p.to("cpu") for n, p in model.state_dict().items()} add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True) attach_align_device_hook( model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=state_dict, preload_module_classes=preload_module_classes, ) return model def cpu_offload_with_hook( model: torch.nn.Module, execution_device: Optional[Union[int, str, torch.device]] = None, prev_module_hook: Optional[UserCpuOffloadHook] = None, ): """ Offloads a model on the CPU and puts it back to an execution device when executed. The difference with [`cpu_offload`] is that the model stays on the execution device after the forward and is only offloaded again when the `offload` method of the returned `hook` is called. Useful for pipelines running a model in a loop. Args: model (`torch.nn.Module`): The model to offload. execution_device(`str`, `int` or `torch.device`, *optional*): The device on which the model should be executed. Will default to the MPS device if it's available, then GPU 0 if there is a GPU, and finally to the CPU. prev_module_hook (`UserCpuOffloadHook`, *optional*): The hook sent back by this function for a previous model in the pipeline you are running. If passed, its offload method will be called just before the forward of the model to which this hook is attached. Example: ```py model_1, hook_1 = cpu_offload_with_hook(model_1, cuda_device) model_2, hook_2 = cpu_offload_with_hook(model_2, cuda_device, prev_module_hook=hook_1) model_3, hook_3 = cpu_offload_with_hook(model_3, cuda_device, prev_module_hook=hook_2) hid_1 = model_1(input) for i in range(50): # model1 is offloaded on the CPU at the first iteration, model 2 stays on the GPU for this whole loop. hid_2 = model_2(hid_1) # model2 is offloaded to the CPU just before this forward. hid_3 = model_3(hid_3) # For model3, you need to manually call the hook offload method. hook_3.offload() ``` """ hook = CpuOffload(execution_device=execution_device, prev_module_hook=prev_module_hook) add_hook_to_module(model, hook, append=True) user_hook = UserCpuOffloadHook(model, hook) return model, user_hook def disk_offload( model: nn.Module, offload_dir: Union[str, os.PathLike], execution_device: Optional[torch.device] = None, offload_buffers: bool = False, preload_module_classes: Optional[List[str]] = None, ): """ Activates full disk offload for a model. As a result, all parameters of the model will be offloaded as memory-mapped array in a given folder. During the forward pass, parameters will be accessed from that folder and put on the execution device passed as they are needed, then offloaded again. Args: model (`torch.nn.Module`): The model to offload. offload_dir (`str` or `os.PathLike`): The folder in which to offload the model weights (or where the model weights are already offloaded). execution_device (`torch.device`, *optional*): The device on which the forward pass of the model will be executed (should be a GPU). Will default to the model's first parameter device. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. """ if not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json")): offload_state_dict(offload_dir, model.state_dict()) if execution_device is None: execution_device = next(iter(model.parameters())).device weights_map = OffloadedWeightsLoader(save_folder=offload_dir) add_hook_to_module(model, AlignDevicesHook(io_same_device=True), append=True) attach_align_device_hook( model, execution_device=execution_device, offload=True, offload_buffers=offload_buffers, weights_map=weights_map, preload_module_classes=preload_module_classes, ) return model def dispatch_model( model: nn.Module, device_map: Dict[str, Union[str, int, torch.device]], main_device: Optional[torch.device] = None, state_dict: Optional[Dict[str, torch.Tensor]] = None, offload_dir: Optional[Union[str, os.PathLike]] = None, offload_index: Optional[Dict[str, str]] = None, offload_buffers: bool = False, skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, force_hooks: bool = False, ): """ Dispatches a model according to a given device map. Layers of the model might be spread across GPUs, offloaded on the CPU or even the disk. Args: model (`torch.nn.Module`): The model to dispatch. device_map (`Dict[str, Union[str, int, torch.device]]`): A dictionary mapping module names in the models `state_dict` to the device they should go to. Note that `"disk"` is accepted even if it's not a proper value for `torch.device`. main_device (`str`, `int` or `torch.device`, *optional*): The main execution device. Will default to the first device in the `device_map` different from `"cpu"` or `"disk"`. state_dict (`Dict[str, torch.Tensor]`, *optional*): The state dict of the part of the model that will be kept on CPU. offload_dir (`str` or `os.PathLike`): The folder in which to offload the model weights (or where the model weights are already offloaded). offload_index (`Dict`, *optional*): A dictionary from weight name to their information (`dtype`/ `shape` or safetensors filename). Will default to the index saved in `save_folder`. offload_buffers (`bool`, *optional*, defaults to `False`): Whether or not to offload the buffers with the model parameters. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. force_hooks (`bool`, *optional*, defaults to `False`): Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a single device. """ # Error early if the device map is incomplete. check_device_map(model, device_map) # for backward compatibility is_bnb_quantized = ( getattr(model, "is_quantized", False) or getattr(model, "is_loaded_in_8bit", False) ) and getattr(model, "quantization_method", "bitsandbytes") == "bitsandbytes" # We attach hooks if the device_map has at least 2 different devices or if # force_hooks is set to `True`. Otherwise, the model in already loaded # in the unique device and the user can decide where to dispatch the model. # If the model is quantized, we always force-dispatch the model if (len(set(device_map.values())) > 1) or is_bnb_quantized or force_hooks: if main_device is None: if set(device_map.values()) == {"cpu"} or set(device_map.values()) == {"cpu", "disk"}: main_device = "cpu" else: main_device = [d for d in device_map.values() if d not in ["cpu", "disk"]][0] if main_device != "cpu": cpu_modules = [name for name, device in device_map.items() if device == "cpu"] if state_dict is None and len(cpu_modules) > 0: state_dict = extract_submodules_state_dict(model.state_dict(), cpu_modules) disk_modules = [name for name, device in device_map.items() if device == "disk"] if offload_dir is None and offload_index is None and len(disk_modules) > 0: raise ValueError( "We need an `offload_dir` to dispatch this model according to this `device_map`, the following submodules " f"need to be offloaded: {', '.join(disk_modules)}." ) if ( len(disk_modules) > 0 and offload_index is None and (not os.path.isdir(offload_dir) or not os.path.isfile(os.path.join(offload_dir, "index.json"))) ): disk_state_dict = extract_submodules_state_dict(model.state_dict(), disk_modules) offload_state_dict(offload_dir, disk_state_dict) execution_device = { name: main_device if device in ["cpu", "disk"] else device for name, device in device_map.items() } execution_device[""] = main_device offloaded_devices = ["disk"] if main_device == "cpu" or main_device == "mps" else ["cpu", "disk"] offload = {name: device in offloaded_devices for name, device in device_map.items()} save_folder = offload_dir if len(disk_modules) > 0 else None if state_dict is not None or save_folder is not None or offload_index is not None: device = main_device if offload_index is not None else None weights_map = OffloadedWeightsLoader( state_dict=state_dict, save_folder=save_folder, index=offload_index, device=device ) else: weights_map = None # When dispatching the model's parameters to the devices specified in device_map, we want to avoid allocating memory several times for the # tied parameters. The dictionary tied_params_map keeps track of the already allocated data for a given tied parameter (represented by its # original pointer) on each devices. tied_params = find_tied_parameters(model) tied_params_map = {} for group in tied_params: for param_name in group: # data_ptr() is enough here, as `find_tied_parameters` finds tied params simply by comparing `param1 is param2`, so we don't need # to care about views of tensors through storage_offset. data_ptr = recursive_getattr(model, param_name).data_ptr() tied_params_map[data_ptr] = {} # Note: To handle the disk offloading case, we can not simply use weights_map[param_name].data_ptr() as the reference pointer, # as we have no guarantee that safetensors' `file.get_tensor()` will always give the same pointer. attach_align_device_hook_on_blocks( model, execution_device=execution_device, offload=offload, offload_buffers=offload_buffers, weights_map=weights_map, skip_keys=skip_keys, preload_module_classes=preload_module_classes, tied_params_map=tied_params_map, ) # warn if there is any params on the meta device offloaded_devices_str = " and ".join( [device for device in set(device_map.values()) if device in ("cpu", "disk")] ) if len(offloaded_devices_str) > 0: logger.warning( f"Some parameters are on the meta device because they were offloaded to the {offloaded_devices_str}." ) # Attaching the hook may break tied weights, so we retie them retie_parameters(model, tied_params) # add warning to cuda and to method def add_warning(fn, model): @wraps(fn) def wrapper(*args, **kwargs): warning_msg = "You shouldn't move a model that is dispatched using accelerate hooks." if str(fn.__name__) == "to": to_device = torch._C._nn._parse_to(*args, **kwargs)[0] if to_device is not None: logger.warning(warning_msg) else: logger.warning(warning_msg) for param in model.parameters(): if param.device == torch.device("meta"): raise RuntimeError("You can't move a model that has some modules offloaded to cpu or disk.") return fn(*args, **kwargs) return wrapper # Make sure to update _accelerate_added_attributes in hooks.py if you add any hook model.to = add_warning(model.to, model) if is_npu_available(): model.npu = add_warning(model.npu, model) elif is_mlu_available(): model.mlu = add_warning(model.mlu, model) elif is_musa_available(): model.musa = add_warning(model.musa, model) elif is_xpu_available(): model.xpu = add_warning(model.xpu, model) else: model.cuda = add_warning(model.cuda, model) # Check if we are using multi-gpus with RTX 4000 series use_multi_gpu = len([device for device in set(device_map.values()) if device not in ("cpu", "disk")]) > 1 if use_multi_gpu and not check_cuda_p2p_ib_support(): logger.warning( "We've detected an older driver with an RTX 4000 series GPU. These drivers have issues with P2P. " "This can affect the multi-gpu inference when using accelerate device_map." "Please make sure to update your driver to the latest version which resolves this." ) else: device = list(device_map.values())[0] # `torch.Tensor.to(<int num>)` is not supported by `torch_npu` (see this [issue](https://github.com/Ascend/pytorch/issues/16)). if is_npu_available() and isinstance(device, int): device = f"npu:{device}" elif is_mlu_available() and isinstance(device, int): device = f"mlu:{device}" elif is_musa_available() and isinstance(device, int): device = f"musa:{device}" elif is_xpu_available() and isinstance(device, int): device = f"xpu:{device}" if device != "disk": model.to(device) else: raise ValueError( "You are trying to offload the whole model to the disk. Please use the `disk_offload` function instead." ) # Convert OrderedDict back to dict for easier usage model.hf_device_map = dict(device_map) return model def load_checkpoint_and_dispatch( model: nn.Module, checkpoint: Union[str, os.PathLike], device_map: Optional[Union[str, Dict[str, Union[int, str, torch.device]]]] = None, max_memory: Optional[Dict[Union[int, str], Union[int, str]]] = None, no_split_module_classes: Optional[List[str]] = None, offload_folder: Optional[Union[str, os.PathLike]] = None, offload_buffers: bool = False, dtype: Optional[Union[str, torch.dtype]] = None, offload_state_dict: Optional[bool] = None, skip_keys: Optional[Union[str, List[str]]] = None, preload_module_classes: Optional[List[str]] = None, force_hooks: bool = False, strict: bool = False, ): """ Loads a (potentially sharded) checkpoint inside a model, potentially sending weights to a given device as they are loaded and adds the various hooks that will make this model run properly (even if split across devices). Args: model (`torch.nn.Module`): The model in which we want to load a checkpoint. checkpoint (`str` or `os.PathLike`): The folder checkpoint to load. It can be: - a path to a file containing a whole model state dict - a path to a `.json` file containing the index to a sharded checkpoint - a path to a folder containing a unique `.index.json` file and the shards of a checkpoint. device_map (`Dict[str, Union[int, str, torch.device]]`, *optional*): A map that specifies where each submodule should go. It doesn't need to be refined to each parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the same device. To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For more information about each option see [here](../concept_guides/big_model_inference#designing-a-device-map). Defaults to None, which means [`dispatch_model`] will not be called. max_memory (`Dict`, *optional*): A dictionary device identifier to maximum memory. Will default to the maximum memory available for each GPU and the available CPU RAM if unset. no_split_module_classes (`List[str]`, *optional*): A list of layer class names that should never be split across device (for instance any layer that has a residual connection). offload_folder (`str` or `os.PathLike`, *optional*): If the `device_map` contains any value `"disk"`, the folder where we will offload weights. offload_buffers (`bool`, *optional*, defaults to `False`): In the layers that are offloaded on the CPU or the hard drive, whether or not to offload the buffers as well as the parameters. dtype (`str` or `torch.dtype`, *optional*): If provided, the weights will be converted to that type when loaded. offload_state_dict (`bool`, *optional*): If `True`, will temporarily offload the CPU state dict on the hard drive to avoid getting out of CPU RAM if the weight of the CPU state dict + the biggest shard does not fit. Will default to `True` if the device map picked contains `"disk"` values. skip_keys (`str` or `List[str]`, *optional*): A list of keys to ignore when moving inputs or outputs between devices. preload_module_classes (`List[str]`, *optional*): A list of classes whose instances should load all their weights (even in the submodules) at the beginning of the forward. This should only be used for classes that have submodules which are registered but not called directly during the forward, for instance if a `dense` linear layer is registered, but at forward, `dense.weight` and `dense.bias` are used in some operations instead of calling `dense` directly. force_hooks (`bool`, *optional*, defaults to `False`): Whether or not to force device hooks to be attached to the model even if all layers are dispatched to a single device. strict (`bool`, *optional*, defaults to `False`): Whether to strictly enforce that the keys in the checkpoint state_dict match the keys of the model's state_dict. Example: ```python >>> from accelerate import init_empty_weights, load_checkpoint_and_dispatch >>> from huggingface_hub import hf_hub_download >>> from transformers import AutoConfig, AutoModelForCausalLM >>> # Download the Weights >>> checkpoint = "EleutherAI/gpt-j-6B" >>> weights_location = hf_hub_download(checkpoint, "pytorch_model.bin") >>> # Create a model and initialize it with empty weights >>> config = AutoConfig.from_pretrained(checkpoint) >>> with init_empty_weights(): ... model = AutoModelForCausalLM.from_config(config) >>> # Load the checkpoint and dispatch it to the right devices >>> model = load_checkpoint_and_dispatch( ... model, weights_location, device_map="auto", no_split_module_classes=["GPTJBlock"] ... ) ``` """ if isinstance(device_map, str) and device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or " "'sequential'." ) if isinstance(device_map, str): if device_map != "sequential": max_memory = get_balanced_memory( model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype, low_zero=(device_map == "balanced_low_0"), ) device_map = infer_auto_device_map( model, max_memory=max_memory, no_split_module_classes=no_split_module_classes, dtype=dtype, offload_buffers=offload_buffers, ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): offload_state_dict = True load_checkpoint_in_model( model, checkpoint, device_map=device_map, offload_folder=offload_folder, dtype=dtype, offload_state_dict=offload_state_dict, offload_buffers=offload_buffers, strict=strict, ) if device_map is None: return model return dispatch_model( model, device_map=device_map, offload_dir=offload_folder, offload_buffers=offload_buffers, skip_keys=skip_keys, preload_module_classes=preload_module_classes, force_hooks=force_hooks, )
accelerate/src/accelerate/big_modeling.py/0
{ "file_path": "accelerate/src/accelerate/big_modeling.py", "repo_id": "accelerate", "token_count": 11462 }
7
# Copyright 2022 The HuggingFace Team and Brian Chao. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A utility for showing and hiding the terminal cursor on Windows and Linux, based on https://github.com/bchao1/bullet """ import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class CursorInfo(ctypes.Structure): # _fields is a specific attr expected by ctypes _fields_ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)] def hide_cursor(): if os.name == "nt": ci = CursorInfo() handle = ctypes.windll.kernel32.GetStdHandle(-11) ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) ci.visible = False ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) elif os.name == "posix": sys.stdout.write("\033[?25l") sys.stdout.flush() def show_cursor(): if os.name == "nt": ci = CursorInfo() handle = ctypes.windll.kernel32.GetStdHandle(-11) ctypes.windll.kernel32.GetConsoleCursorInfo(handle, ctypes.byref(ci)) ci.visible = True ctypes.windll.kernel32.SetConsoleCursorInfo(handle, ctypes.byref(ci)) elif os.name == "posix": sys.stdout.write("\033[?25h") sys.stdout.flush() @contextmanager def hide(): "Context manager to hide the terminal cursor" try: hide_cursor() yield finally: show_cursor()
accelerate/src/accelerate/commands/menu/cursor.py/0
{ "file_path": "accelerate/src/accelerate/commands/menu/cursor.py", "repo_id": "accelerate", "token_count": 763 }
8
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import warnings import torch from .state import AcceleratorState, GradientState from .utils import DistributedType, honor_type, is_lomo_available, is_torch_xla_available if is_torch_xla_available(): import torch_xla.core.xla_model as xm def move_to_device(state, device): if isinstance(state, (list, tuple)): return honor_type(state, (move_to_device(t, device) for t in state)) elif isinstance(state, dict): return type(state)({k: move_to_device(v, device) for k, v in state.items()}) elif isinstance(state, torch.Tensor): return state.to(device) return state class AcceleratedOptimizer(torch.optim.Optimizer): """ Internal wrapper around a torch optimizer. Conditionally will perform `step` and `zero_grad` if gradients should be synchronized when performing gradient accumulation. Args: optimizer (`torch.optim.optimizer.Optimizer`): The optimizer to wrap. device_placement (`bool`, *optional*, defaults to `True`): Whether or not the optimizer should handle device placement. If so, it will place the state dictionary of `optimizer` on the right device. scaler (`torch.cuda.amp.grad_scaler.GradScaler`, *optional*): The scaler to use in the step function if training with mixed precision. """ def __init__(self, optimizer, device_placement=True, scaler=None): self.optimizer = optimizer self.scaler = scaler self.accelerator_state = AcceleratorState() self.gradient_state = GradientState() self.device_placement = device_placement self._is_overflow = False if self.scaler is not None: self._accelerate_step_called = False self._optimizer_original_step_method = self.optimizer.step self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) # Handle device placement if device_placement: state_dict = self.optimizer.state_dict() if self.accelerator_state.distributed_type == DistributedType.XLA: xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) else: state_dict = move_to_device(state_dict, self.accelerator_state.device) self.optimizer.load_state_dict(state_dict) @property def state(self): return self.optimizer.state @state.setter def state(self, state): self.optimizer.state = state @property def param_groups(self): return self.optimizer.param_groups @param_groups.setter def param_groups(self, param_groups): self.optimizer.param_groups = param_groups @property def defaults(self): return self.optimizer.defaults @defaults.setter def defaults(self, defaults): self.optimizer.defaults = defaults def add_param_group(self, param_group): self.optimizer.add_param_group(param_group) def load_state_dict(self, state_dict): if self.accelerator_state.distributed_type == DistributedType.XLA and self.device_placement: xm.send_cpu_data_to_device(state_dict, self.accelerator_state.device) self.optimizer.load_state_dict(state_dict) def state_dict(self): return self.optimizer.state_dict() def zero_grad(self, set_to_none=None): if self.gradient_state.sync_gradients: accept_arg = "set_to_none" in inspect.signature(self.optimizer.zero_grad).parameters if accept_arg: if set_to_none is None: set_to_none = True self.optimizer.zero_grad(set_to_none=set_to_none) else: if set_to_none is not None: raise ValueError("`set_to_none` for Optimizer.zero_grad` is not supported by this optimizer.") self.optimizer.zero_grad() def train(self): """ Sets the optimizer to "train" mode. Useful for optimizers like `schedule_free` """ return self.optimizer.train() def eval(self): """ Sets the optimizer to "eval" mode. Useful for optimizers like `schedule_free` """ return self.optimizer.eval() def step(self, closure=None): if is_lomo_available(): from lomo_optim import AdaLomo, Lomo if ( not self.gradient_state.is_xla_gradients_synced and self.accelerator_state.distributed_type == DistributedType.XLA ): gradients = xm._fetch_gradients(self.optimizer) xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) self.gradient_state.is_xla_gradients_synced = True if is_lomo_available(): # `step` should be a no-op for LOMO optimizers. if isinstance(self.optimizer, (Lomo, AdaLomo)): return if self.gradient_state.sync_gradients: if self.scaler is not None: self.optimizer.step = self._optimizer_patched_step_method self.scaler.step(self.optimizer, closure) self.scaler.update() if not self._accelerate_step_called: # If the optimizer step was skipped, gradient overflow was detected. self._is_overflow = True else: self._is_overflow = False # Reset the step method to the original one self.optimizer.step = self._optimizer_original_step_method # Reset the indicator self._accelerate_step_called = False else: self.optimizer.step(closure) if self.accelerator_state.distributed_type == DistributedType.XLA: self.gradient_state.is_xla_gradients_synced = False def _switch_parameters(self, parameters_map): for param_group in self.optimizer.param_groups: param_group["params"] = [parameters_map.get(p, p) for p in param_group["params"]] @property def is_overflow(self): """Whether or not the optimizer step was done, or skipped because of gradient overflow.""" warnings.warn( "The `is_overflow` property is deprecated and will be removed in version 1.0 of Accelerate use " "`optimizer.step_was_skipped` instead.", FutureWarning, ) return self._is_overflow @property def step_was_skipped(self): """Whether or not the optimizer step was skipped.""" return self._is_overflow def __getstate__(self): _ignored_keys = [ "_accelerate_step_called", "_optimizer_original_step_method", "_optimizer_patched_step_method", ] return {k: v for k, v in self.__dict__.items() if k not in _ignored_keys} def __setstate__(self, state): self.__dict__.update(state) if self.scaler is not None: self._accelerate_step_called = False self._optimizer_original_step_method = self.optimizer.step self._optimizer_patched_step_method = patch_optimizer_step(self, self.optimizer.step) def patch_optimizer_step(accelerated_optimizer: AcceleratedOptimizer, method): def patched_step(*args, **kwargs): accelerated_optimizer._accelerate_step_called = True return method(*args, **kwargs) return patched_step
accelerate/src/accelerate/optimizer.py/0
{ "file_path": "accelerate/src/accelerate/optimizer.py", "repo_id": "accelerate", "token_count": 3387 }
9
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import logging import shutil from pathlib import Path import torch from safetensors.torch import load_file from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy, StateDictType from torch.utils.data import DataLoader from accelerate import Accelerator, FullyShardedDataParallelPlugin from accelerate.commands.merge import merge_command, merge_command_parser from accelerate.state import AcceleratorState from accelerate.test_utils.training import RegressionDataset from accelerate.utils import merge_fsdp_weights, patch_environment, save_fsdp_model logging.basicConfig(level=logging.INFO) parser = merge_command_parser() class TinyModel(torch.nn.Module): def __init__(self): super().__init__() self.linear1 = torch.nn.Linear(16, 16) self.activation = torch.nn.ReLU() self.linear2 = torch.nn.Linear(16, 16) self.softmax = torch.nn.Softmax() def forward(self, x): return self.linear2(self.activation(self.linear1(x))) def setup(): if AcceleratorState._shared_state != {}: AcceleratorState()._reset_state() plugin = FullyShardedDataParallelPlugin( sharding_strategy=ShardingStrategy.FULL_SHARD, state_dict_type=StateDictType.SHARDED_STATE_DICT ) model = TinyModel() with patch_environment(fsdp_auto_wrap_policy="SIZE_BASED_WRAP"): plugin.set_auto_wrap_policy(model) accelerator = Accelerator(fsdp_plugin=plugin) model = accelerator.prepare(model) return model, plugin, accelerator def mock_training(accelerator, model): train_set = RegressionDataset(length=128, seed=42) train_dl = DataLoader(train_set, batch_size=16, shuffle=False) optimizer = torch.optim.SGD(model.parameters(), lr=0.1) train_dl, model, optimizer = accelerator.prepare(train_dl, model, optimizer) for _ in range(3): for batch in train_dl: model.zero_grad() output = model(batch["x"]) loss = torch.nn.functional.mse_loss(output, batch["y"]) accelerator.backward(loss) optimizer.step() return model def check_weights(operation, state_1, state_2): for weight_1, weight_2 in zip(state_1.values(), state_2.values()): if str(weight_1.device) != "cuda": weight_1 = weight_1.to("cuda") if str(weight_2.device) != "cuda": weight_2 = weight_2.to("cuda") if operation == "same": assert torch.allclose(weight_1, weight_2) else: assert not torch.allclose(weight_1, weight_2) def check_safetensors_weights(path, model): safe_state_dict = load_file(path / "model.safetensors") safe_loaded_model = TinyModel() check_weights("diff", model.state_dict(), safe_loaded_model.state_dict()) safe_loaded_model.load_state_dict(safe_state_dict) check_weights("same", model.state_dict(), safe_loaded_model.state_dict()) def check_pytorch_weights(path, model): nonsafe_state_dict = torch.load(path / "pytorch_model.bin") nonsafe_loaded_model = TinyModel() check_weights("diff", model.state_dict(), nonsafe_loaded_model.state_dict()) nonsafe_loaded_model.load_state_dict(nonsafe_state_dict) check_weights("same", model.state_dict(), nonsafe_loaded_model.state_dict()) def test_merge_weights_safetensors(model, path): # Should now be saved at `path/merged.safetensors` merge_fsdp_weights(path / "pytorch_model_fsdp_0", path, safe_serialization=True) check_safetensors_weights(path, model) def test_merge_weights_command_safetensors(model, path): args = parser.parse_args([str(path / "pytorch_model_fsdp_0"), str(path)]) merge_command(args) check_safetensors_weights(path, model) def test_merge_weights_pytorch(model, path): # Should now be saved at `path/merged.bin` merge_fsdp_weights(path / "pytorch_model_fsdp_0", path, safe_serialization=False) check_pytorch_weights(path, model) def test_merge_weights_command_pytorch(model, path): args = parser.parse_args([str(path / "pytorch_model_fsdp_0"), str(path), "--unsafe_serialization"]) merge_command(args) check_pytorch_weights(path, model) if __name__ == "__main__": # Note this test requires at least two accelerators! model, plugin, accelerator = setup() if accelerator.num_processes > 1: try: # Initial setup for things out_path = Path("test_merge_weights_fsdp_weights") if not out_path.exists(): out_path.mkdir(parents=True, exist_ok=True) # Train briefly once weights aren't the baseline model = mock_training(accelerator, model) accelerator.wait_for_everyone() gc.collect() # Needed for some lingering refs after training save_fsdp_model(plugin, accelerator, model, out_path) accelerator.wait_for_everyone() # Finally we can test test_merge_weights_safetensors(model, out_path) test_merge_weights_command_safetensors(model, out_path) test_merge_weights_pytorch(model, out_path) test_merge_weights_command_pytorch(model, out_path) except Exception: raise finally: # Cleanup in case of any failures if accelerator.is_main_process: shutil.rmtree(out_path) accelerator.wait_for_everyone() accelerator.end_training()
accelerate/src/accelerate/test_utils/scripts/test_merge_weights.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/test_merge_weights.py", "repo_id": "accelerate", "token_count": 2351 }
10
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import subprocess import sys import warnings from ast import literal_eval from shutil import which from typing import Any, Dict, List, Tuple import torch from ..commands.config.config_args import SageMakerConfig from ..utils import ( DynamoBackend, PrecisionType, is_fp8_available, is_ipex_available, is_mlu_available, is_musa_available, is_npu_available, is_torch_xla_available, is_xpu_available, ) from ..utils.constants import DEEPSPEED_MULTINODE_LAUNCHERS from ..utils.other import is_port_in_use, merge_dicts from .dataclasses import DistributedType, SageMakerDistributedType def _filter_args(args, parser, default_args=[]): """ Filters out all `accelerate` specific args """ new_args, _ = parser.parse_known_args(default_args) for key, value in vars(args).items(): if key in vars(new_args).keys(): setattr(new_args, key, value) return new_args def _get_mpirun_args(): """ Determines the executable and argument names for mpirun, based on the type of install. The supported MPI programs are: OpenMPI, Intel MPI, or MVAPICH. Returns: Program name and arg names for hostfile, num processes, and processes per node """ # Find the MPI program name mpi_apps = [x for x in ["mpirun", "mpiexec"] if which(x)] if len(mpi_apps) == 0: raise OSError("mpirun or mpiexec were not found. Ensure that Intel MPI, Open MPI, or MVAPICH are installed.") # Call the app with the --version flag to determine which MPI app is installed mpi_app = mpi_apps[0] mpirun_version = subprocess.check_output([mpi_app, "--version"]) if b"Open MPI" in mpirun_version: return mpi_app, "--hostfile", "-n", "--npernode", "--bind-to" else: # Intel MPI and MVAPICH both use the same arg names return mpi_app, "-f", "-n", "-ppn", "" def setup_fp8_env(args: argparse.Namespace, current_env: Dict[str, str]): """ Setup the FP8 environment variables. """ prefix = "ACCELERATE_" for arg in vars(args): if arg.startswith("fp8_"): value = getattr(args, arg) if value is not None: current_env[f"{prefix}{arg.upper()}"] = str(getattr(args, arg)) return current_env def prepare_simple_launcher_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]: """ Prepares and returns the command list and an environment with the correct simple launcher environment variables. """ cmd = [] if args.no_python and args.module: raise ValueError("--module and --no_python cannot be used together") if args.mpirun_hostfile is not None: mpi_app_name, hostfile_arg, num_proc_arg, proc_per_node_arg, bind_to_arg = _get_mpirun_args() mpirun_ccl = getattr(args, "mpirun_ccl", None) bind_to = getattr(args, "bind-to", "socket") num_machines = args.num_machines num_processes = getattr(args, "num_processes", None) nproc_per_node = str(num_processes // num_machines) if num_processes and num_machines else "1" cmd += [ mpi_app_name, hostfile_arg, args.mpirun_hostfile, proc_per_node_arg, nproc_per_node, ] if num_processes: cmd += [num_proc_arg, str(num_processes)] if bind_to_arg: cmd += [bind_to_arg, bind_to] if not args.no_python: cmd.append(sys.executable) if args.module: cmd.append("-m") cmd.append(args.training_script) cmd.extend(args.training_script_args) current_env = os.environ.copy() current_env["ACCELERATE_USE_CPU"] = str(args.cpu or args.use_cpu) if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" if args.gpu_ids != "all" and args.gpu_ids is not None: if is_xpu_available(): current_env["ZE_AFFINITY_MASK"] = args.gpu_ids elif is_mlu_available(): current_env["MLU_VISIBLE_DEVICES"] = args.gpu_ids elif is_musa_available(): current_env["MUSA_VISIBLE_DEVICES"] = args.gpu_ids elif is_npu_available(): current_env["ASCEND_RT_VISIBLE_DEVICES"] = args.gpu_ids else: current_env["CUDA_VISIBLE_DEVICES"] = args.gpu_ids if args.num_machines > 1: current_env["MASTER_ADDR"] = args.main_process_ip current_env["MASTER_PORT"] = str(args.main_process_port) if args.mpirun_hostfile is not None: current_env["CCL_WORKER_COUNT"] = str(mpirun_ccl) elif args.num_processes > 1: current_env["MASTER_ADDR"] = args.main_process_ip if args.main_process_ip is not None else "127.0.0.1" current_env["MASTER_PORT"] = str(args.main_process_port) if args.main_process_port is not None else "29500" try: mixed_precision = PrecisionType(args.mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) if args.mixed_precision.lower() == "fp8": if not is_fp8_available(): raise RuntimeError( "FP8 is not available on this machine. Please ensure that either Transformer Engine or MSAMP is installed." ) current_env = setup_fp8_env(args, current_env) try: dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) except ValueError: raise ValueError( f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." ) current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph) current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic) current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) if is_ipex_available(): current_env["ACCELERATE_USE_IPEX"] = str(args.ipex).lower() current_env["ACCELERATE_USE_XPU"] = str(args.use_xpu).lower() if args.enable_cpu_affinity: current_env["ACCELERATE_CPU_AFFINITY"] = "1" return cmd, current_env def prepare_multi_gpu_env(args: argparse.Namespace) -> Dict[str, str]: """ Prepares and returns an environment with the correct multi-GPU environment variables. """ num_processes = args.num_processes num_machines = args.num_machines main_process_ip = args.main_process_ip main_process_port = args.main_process_port if num_machines > 1: args.nproc_per_node = str(num_processes // num_machines) args.nnodes = str(num_machines) args.node_rank = int(args.machine_rank) if getattr(args, "same_network", False): args.master_addr = str(main_process_ip) args.master_port = str(main_process_port) else: args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}" else: args.nproc_per_node = str(num_processes) if main_process_port is not None: args.master_port = str(main_process_port) if main_process_port is None: main_process_port = 29500 # only need to check port availability in main process, in case we have to start multiple launchers on the same machine # for some reasons like splitting log files. need_port_check = num_machines <= 1 or int(args.machine_rank) == 0 if need_port_check and is_port_in_use(main_process_port): raise ConnectionError( f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. " "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)" " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`." ) if args.module and args.no_python: raise ValueError("--module and --no_python cannot be used together") elif args.module: args.module = True elif args.no_python: args.no_python = True current_env = os.environ.copy() if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" gpu_ids = getattr(args, "gpu_ids", "all") if gpu_ids != "all" and args.gpu_ids is not None: if is_xpu_available(): current_env["ZE_AFFINITY_MASK"] = gpu_ids elif is_mlu_available(): current_env["MLU_VISIBLE_DEVICES"] = gpu_ids elif is_musa_available(): current_env["MUSA_VISIBLE_DEVICES"] = gpu_ids elif is_npu_available(): current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids else: current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids mixed_precision = args.mixed_precision.lower() try: mixed_precision = PrecisionType(mixed_precision) except ValueError: raise ValueError(f"Unknown mixed_precision mode: {mixed_precision}. Choose between {PrecisionType.list()}.") current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) if args.mixed_precision.lower() == "fp8": if not is_fp8_available(): raise RuntimeError( "FP8 is not available on this machine. Please ensure that either Transformer Engine or MSAMP is installed." ) current_env = setup_fp8_env(args, current_env) try: dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) except ValueError: raise ValueError( f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." ) current_env["ACCELERATE_DYNAMO_BACKEND"] = dynamo_backend.value current_env["ACCELERATE_DYNAMO_MODE"] = args.dynamo_mode current_env["ACCELERATE_DYNAMO_USE_FULLGRAPH"] = str(args.dynamo_use_fullgraph) current_env["ACCELERATE_DYNAMO_USE_DYNAMIC"] = str(args.dynamo_use_dynamic) if args.use_fsdp: current_env["ACCELERATE_USE_FSDP"] = "true" if args.fsdp_cpu_ram_efficient_loading and not args.fsdp_sync_module_states: raise ValueError("When using `--fsdp_cpu_ram_efficient_loading` set `--fsdp_sync_module_states` to `True`") current_env["FSDP_SHARDING_STRATEGY"] = str(args.fsdp_sharding_strategy) current_env["FSDP_OFFLOAD_PARAMS"] = str(args.fsdp_offload_params).lower() current_env["FSDP_MIN_NUM_PARAMS"] = str(args.fsdp_min_num_params) if args.fsdp_auto_wrap_policy is not None: current_env["FSDP_AUTO_WRAP_POLICY"] = str(args.fsdp_auto_wrap_policy) if args.fsdp_transformer_layer_cls_to_wrap is not None: current_env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = str(args.fsdp_transformer_layer_cls_to_wrap) if args.fsdp_backward_prefetch_policy is not None: warnings.warn( "`fsdp_backward_prefetch_policy` is deprecated and will be removed in version 0.27.0 of 🤗 Accelerate. Use" " `fsdp_backward_prefetch` instead", FutureWarning, ) args.fsdp_backward_prefetch = args.fsdp_backward_prefetch_policy if args.fsdp_backward_prefetch is not None: current_env["FSDP_BACKWARD_PREFETCH"] = str(args.fsdp_backward_prefetch) if args.fsdp_state_dict_type is not None: current_env["FSDP_STATE_DICT_TYPE"] = str(args.fsdp_state_dict_type) current_env["FSDP_FORWARD_PREFETCH"] = str(args.fsdp_forward_prefetch).lower() current_env["FSDP_USE_ORIG_PARAMS"] = str(args.fsdp_use_orig_params).lower() current_env["FSDP_CPU_RAM_EFFICIENT_LOADING"] = str(args.fsdp_cpu_ram_efficient_loading).lower() current_env["FSDP_SYNC_MODULE_STATES"] = str(args.fsdp_sync_module_states).lower() current_env["FSDP_ACTIVATION_CHECKPOINTING"] = str(args.fsdp_activation_checkpointing).lower() if args.use_megatron_lm: prefix = "MEGATRON_LM_" current_env["ACCELERATE_USE_MEGATRON_LM"] = "true" current_env[prefix + "TP_DEGREE"] = str(args.megatron_lm_tp_degree) current_env[prefix + "PP_DEGREE"] = str(args.megatron_lm_pp_degree) current_env[prefix + "GRADIENT_CLIPPING"] = str(args.megatron_lm_gradient_clipping) if args.megatron_lm_num_micro_batches is not None: current_env[prefix + "NUM_MICRO_BATCHES"] = str(args.megatron_lm_num_micro_batches) if args.megatron_lm_sequence_parallelism is not None: current_env[prefix + "SEQUENCE_PARALLELISM"] = str(args.megatron_lm_sequence_parallelism) if args.megatron_lm_recompute_activations is not None: current_env[prefix + "RECOMPUTE_ACTIVATIONS"] = str(args.megatron_lm_recompute_activations) if args.megatron_lm_use_distributed_optimizer is not None: current_env[prefix + "USE_DISTRIBUTED_OPTIMIZER"] = str(args.megatron_lm_use_distributed_optimizer) current_env["OMP_NUM_THREADS"] = str(args.num_cpu_threads_per_process) if args.enable_cpu_affinity: current_env["ACCELERATE_CPU_AFFINITY"] = "1" return current_env def prepare_deepspeed_cmd_env(args: argparse.Namespace) -> Tuple[List[str], Dict[str, str]]: """ Prepares and returns the command list and an environment with the correct DeepSpeed environment variables. """ num_processes = args.num_processes num_machines = args.num_machines main_process_ip = args.main_process_ip main_process_port = args.main_process_port cmd = None # make sure launcher is not None if args.deepspeed_multinode_launcher is None: # set to default pdsh args.deepspeed_multinode_launcher = DEEPSPEED_MULTINODE_LAUNCHERS[0] if num_machines > 1 and args.deepspeed_multinode_launcher != DEEPSPEED_MULTINODE_LAUNCHERS[1]: cmd = ["deepspeed", "--no_local_rank"] cmd.extend(["--hostfile", str(args.deepspeed_hostfile), "--launcher", str(args.deepspeed_multinode_launcher)]) if args.deepspeed_exclusion_filter is not None: cmd.extend( [ "--exclude", str(args.deepspeed_exclusion_filter), ] ) elif args.deepspeed_inclusion_filter is not None: cmd.extend( [ "--include", str(args.deepspeed_inclusion_filter), ] ) else: cmd.extend(["--num_gpus", str(args.num_processes // args.num_machines)]) if main_process_ip: cmd.extend(["--master_addr", str(main_process_ip)]) cmd.extend(["--master_port", str(main_process_port)]) if args.module and args.no_python: raise ValueError("--module and --no_python cannot be used together") elif args.module: cmd.append("--module") elif args.no_python: cmd.append("--no_python") cmd.append(args.training_script) cmd.extend(args.training_script_args) elif num_machines > 1 and args.deepspeed_multinode_launcher == DEEPSPEED_MULTINODE_LAUNCHERS[1]: args.nproc_per_node = str(num_processes // num_machines) args.nnodes = str(num_machines) args.node_rank = int(args.machine_rank) if getattr(args, "same_network", False): args.master_addr = str(main_process_ip) args.master_port = str(main_process_port) else: args.rdzv_endpoint = f"{main_process_ip}:{main_process_port}" else: args.nproc_per_node = str(num_processes) if main_process_port is not None: args.master_port = str(main_process_port) if main_process_port is None: main_process_port = 29500 # only need to check port availability in main process, in case we have to start multiple launchers on the same machine # for some reasons like splitting log files. need_port_check = num_machines <= 1 or int(args.machine_rank) == 0 if need_port_check and is_port_in_use(main_process_port): raise ConnectionError( f"Tried to launch distributed communication on port `{main_process_port}`, but another process is utilizing it. " "Please specify a different port (such as using the `--main_process_port` flag or specifying a different `main_process_port` in your config file)" " and rerun your script. To automatically use the next open port (on a single node), you can set this to `0`." ) if args.module and args.no_python: raise ValueError("--module and --no_python cannot be used together") elif args.module: args.module = True elif args.no_python: args.no_python = True current_env = os.environ.copy() if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" gpu_ids = getattr(args, "gpu_ids", "all") if gpu_ids != "all" and args.gpu_ids is not None: if is_xpu_available(): current_env["ZE_AFFINITY_MASK"] = gpu_ids elif is_mlu_available(): current_env["MLU_VISIBLE_DEVICES"] = gpu_ids elif is_musa_available(): current_env["MUSA_VISIBLE_DEVICES"] = gpu_ids elif is_npu_available(): current_env["ASCEND_RT_VISIBLE_DEVICES"] = gpu_ids else: current_env["CUDA_VISIBLE_DEVICES"] = gpu_ids try: mixed_precision = PrecisionType(args.mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) current_env["PYTHONPATH"] = env_var_path_add("PYTHONPATH", os.path.abspath(".")) current_env["ACCELERATE_MIXED_PRECISION"] = str(mixed_precision) if args.mixed_precision.lower() == "fp8": if not is_fp8_available(): raise RuntimeError( "FP8 is not available on this machine. Please ensure that either Transformer Engine or MSAMP is installed." ) current_env = setup_fp8_env(args, current_env) current_env["ACCELERATE_CONFIG_DS_FIELDS"] = str(args.deepspeed_fields_from_accelerate_config).lower() current_env["ACCELERATE_USE_DEEPSPEED"] = "true" if args.zero_stage is not None: current_env["ACCELERATE_DEEPSPEED_ZERO_STAGE"] = str(args.zero_stage) if args.gradient_accumulation_steps is not None: current_env["ACCELERATE_GRADIENT_ACCUMULATION_STEPS"] = str(args.gradient_accumulation_steps) if args.gradient_clipping is not None: current_env["ACCELERATE_GRADIENT_CLIPPING"] = str(args.gradient_clipping).lower() if args.offload_optimizer_device is not None: current_env["ACCELERATE_DEEPSPEED_OFFLOAD_OPTIMIZER_DEVICE"] = str(args.offload_optimizer_device).lower() if args.offload_param_device is not None: current_env["ACCELERATE_DEEPSPEED_OFFLOAD_PARAM_DEVICE"] = str(args.offload_param_device).lower() if args.zero3_init_flag is not None: current_env["ACCELERATE_DEEPSPEED_ZERO3_INIT"] = str(args.zero3_init_flag).lower() if args.zero3_save_16bit_model is not None: current_env["ACCELERATE_DEEPSPEED_ZERO3_SAVE_16BIT_MODEL"] = str(args.zero3_save_16bit_model).lower() if args.deepspeed_config_file is not None: current_env["ACCELERATE_DEEPSPEED_CONFIG_FILE"] = str(args.deepspeed_config_file) if args.enable_cpu_affinity: current_env["ACCELERATE_CPU_AFFINITY"] = "1" if args.deepspeed_moe_layer_cls_names is not None: current_env["ACCELERATE_DEEPSPEED_MOE_LAYER_CLS_NAMES"] = str(args.deepspeed_moe_layer_cls_names) return cmd, current_env def prepare_tpu( args: argparse.Namespace, current_env: Dict[str, str], pod: bool = False ) -> Tuple[argparse.Namespace, Dict[str, str]]: """ Prepares and returns an environment with the correct TPU environment variables. """ if args.mixed_precision == "bf16" and is_torch_xla_available(check_is_tpu=True): if args.downcast_bf16: current_env["XLA_DOWNCAST_BF16"] = "1" else: current_env["XLA_USE_BF16"] = "1" if args.debug: current_env["ACCELERATE_DEBUG_MODE"] = "true" if pod: # Take explicit args and set them up for XLA args.vm = args.tpu_vm args.tpu = args.tpu_name return args, current_env def _convert_nargs_to_dict(nargs: List[str]) -> Dict[str, str]: if len(nargs) < 0: return {} # helper function to infer type for argsparser def _infer_type(s): try: s = float(s) if s // 1 == s: return int(s) return s except ValueError: return s parser = argparse.ArgumentParser() _, unknown = parser.parse_known_args(nargs) for index, argument in enumerate(unknown): if argument.startswith(("-", "--")): action = None if index + 1 < len(unknown): # checks if next index would be in list if unknown[index + 1].startswith(("-", "--")): # checks if next element is an key # raise an error if element is store_true or store_false raise ValueError( "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types" ) else: # raise an error if last element is store_true or store_false raise ValueError( "SageMaker doesn’t support argparse actions for `store_true` or `store_false`. Please define explicit types" ) # adds argument to parser based on action_store true if action is None: parser.add_argument(argument, type=_infer_type) else: parser.add_argument(argument, action=action) return { key: (literal_eval(value) if value in ("True", "False") else value) for key, value in parser.parse_args(nargs).__dict__.items() } def prepare_sagemager_args_inputs( sagemaker_config: SageMakerConfig, args: argparse.Namespace ) -> Tuple[argparse.Namespace, Dict[str, Any]]: # configure environment print("Configuring Amazon SageMaker environment") os.environ["AWS_DEFAULT_REGION"] = sagemaker_config.region # configure credentials if sagemaker_config.profile is not None: os.environ["AWS_PROFILE"] = sagemaker_config.profile elif args.aws_access_key_id is not None and args.aws_secret_access_key is not None: os.environ["AWS_ACCESS_KEY_ID"] = args.aws_access_key_id os.environ["AWS_SECRET_ACCESS_KEY"] = args.aws_secret_access_key else: raise OSError("You need to provide an aws_access_key_id and aws_secret_access_key when not using aws_profile") # extract needed arguments source_dir = os.path.dirname(args.training_script) if not source_dir: # checks if string is empty source_dir = "." entry_point = os.path.basename(args.training_script) if not entry_point.endswith(".py"): raise ValueError(f'Your training script should be a python script and not "{entry_point}"') print("Converting Arguments to Hyperparameters") hyperparameters = _convert_nargs_to_dict(args.training_script_args) try: mixed_precision = PrecisionType(args.mixed_precision.lower()) except ValueError: raise ValueError( f"Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}." ) try: dynamo_backend = DynamoBackend(args.dynamo_backend.upper()) except ValueError: raise ValueError( f"Unknown dynamo backend: {args.dynamo_backend.upper()}. Choose between {DynamoBackend.list()}." ) # Environment variables to be set for use during training job environment = { "ACCELERATE_USE_SAGEMAKER": "true", "ACCELERATE_MIXED_PRECISION": str(mixed_precision), "ACCELERATE_DYNAMO_BACKEND": dynamo_backend.value, "ACCELERATE_DYNAMO_MODE": args.dynamo_mode, "ACCELERATE_DYNAMO_USE_FULLGRAPH": str(args.dynamo_use_fullgraph), "ACCELERATE_DYNAMO_USE_DYNAMIC": str(args.dynamo_use_dynamic), "ACCELERATE_SAGEMAKER_DISTRIBUTED_TYPE": sagemaker_config.distributed_type.value, } if args.mixed_precision.lower() == "fp8": if not is_fp8_available(): raise RuntimeError( "FP8 is not available on this machine. Please ensure that either Transformer Engine or MSAMP is installed." ) environment = setup_fp8_env(args, environment) # configure distribution set up distribution = None if sagemaker_config.distributed_type == SageMakerDistributedType.DATA_PARALLEL: distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} # configure sagemaker inputs sagemaker_inputs = None if sagemaker_config.sagemaker_inputs_file is not None: print(f"Loading SageMaker Inputs from {sagemaker_config.sagemaker_inputs_file} file") sagemaker_inputs = {} with open(sagemaker_config.sagemaker_inputs_file) as file: for i, line in enumerate(file): if i == 0: continue l = line.split("\t") sagemaker_inputs[l[0]] = l[1].strip() print(f"Loaded SageMaker Inputs: {sagemaker_inputs}") # configure sagemaker metrics sagemaker_metrics = None if sagemaker_config.sagemaker_metrics_file is not None: print(f"Loading SageMaker Metrics from {sagemaker_config.sagemaker_metrics_file} file") sagemaker_metrics = [] with open(sagemaker_config.sagemaker_metrics_file) as file: for i, line in enumerate(file): if i == 0: continue l = line.split("\t") metric_dict = { "Name": l[0], "Regex": l[1].strip(), } sagemaker_metrics.append(metric_dict) print(f"Loaded SageMaker Metrics: {sagemaker_metrics}") # configure session print("Creating Estimator") args = { "image_uri": sagemaker_config.image_uri, "entry_point": entry_point, "source_dir": source_dir, "role": sagemaker_config.iam_role_name, "transformers_version": sagemaker_config.transformers_version, "pytorch_version": sagemaker_config.pytorch_version, "py_version": sagemaker_config.py_version, "base_job_name": sagemaker_config.base_job_name, "instance_count": sagemaker_config.num_machines, "instance_type": sagemaker_config.ec2_instance_type, "debugger_hook_config": False, "distribution": distribution, "hyperparameters": hyperparameters, "environment": environment, "metric_definitions": sagemaker_metrics, } if sagemaker_config.additional_args is not None: args = merge_dicts(sagemaker_config.additional_args, args) return args, sagemaker_inputs def env_var_path_add(env_var_name, path_to_add): """ Extends a path-based environment variable's value with a new path and returns the updated value. It's up to the caller to set it in os.environ. """ paths = [p for p in os.environ.get(env_var_name, "").split(":") if len(p) > 0] paths.append(str(path_to_add)) return ":".join(paths) class PrepareForLaunch: """ Prepare a function that will launched in a distributed setup. Args: launcher (`Callable`): The function to launch. distributed_type ([`~state.DistributedType`]): The distributed type to prepare for. debug (`bool`, *optional*, defaults to `False`): Whether or not this is a debug launch. """ def __init__(self, launcher, distributed_type="NO", debug=False): self.launcher = launcher self.distributed_type = DistributedType(distributed_type) self.debug = debug def __call__(self, index, *args): if self.debug: world_size = int(os.environ.get("WORLD_SIZE")) rdv_file = os.environ.get("ACCELERATE_DEBUG_RDV_FILE") torch.distributed.init_process_group( "gloo", rank=index, store=torch.distributed.FileStore(rdv_file, world_size), world_size=world_size, ) elif self.distributed_type in ( DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, DistributedType.MULTI_XPU, DistributedType.MULTI_CPU, ): # Prepare the environment for torch.distributed os.environ["LOCAL_RANK"] = str(index) nproc = int(os.environ.get("NPROC", 1)) node_rank = int(os.environ.get("NODE_RANK", 0)) os.environ["RANK"] = str(nproc * node_rank + index) os.environ["FORK_LAUNCHED"] = str(1) self.launcher(*args)
accelerate/src/accelerate/utils/launch.py/0
{ "file_path": "accelerate/src/accelerate/utils/launch.py", "repo_id": "accelerate", "token_count": 13048 }
11
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import os import torch from transformers import AutoModel from transformers.testing_utils import mockenv_context from transformers.trainer_utils import set_seed from accelerate.accelerator import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils.testing import ( AccelerateTestCase, TempDirTestCase, execute_subprocess_async, get_launch_command, path_in_accelerate_package, require_fsdp, require_multi_device, require_non_cpu, require_non_torch_xla, slow, ) from accelerate.utils.constants import ( FSDP_AUTO_WRAP_POLICY, FSDP_BACKWARD_PREFETCH, FSDP_SHARDING_STRATEGY, FSDP_STATE_DICT_TYPE, ) from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin from accelerate.utils.fsdp_utils import disable_fsdp_ram_efficient_loading, enable_fsdp_ram_efficient_loading from accelerate.utils.other import patch_environment set_seed(42) BERT_BASE_CASED = "bert-base-cased" FP16 = "fp16" BF16 = "bf16" dtypes = [FP16, BF16] @require_fsdp @require_non_cpu @require_non_torch_xla class FSDPPluginIntegration(AccelerateTestCase): def setUp(self): super().setUp() self.dist_env = dict( MASTER_ADDR="localhost", MASTER_PORT="10999", RANK="0", LOCAL_RANK="0", WORLD_SIZE="1", ) self.fsdp_env = dict(ACCELERATE_USE_FSDP="true", **self.dist_env) def test_sharding_strategy(self): from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy # check that giving enums works fine for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): env = self.fsdp_env.copy() env["FSDP_SHARDING_STRATEGY"] = f"{i + 1}" with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.sharding_strategy == ShardingStrategy(i + 1) fsdp_plugin = FullyShardedDataParallelPlugin(sharding_strategy=ShardingStrategy(i + 1)) assert fsdp_plugin.sharding_strategy == ShardingStrategy(i + 1) # check that giving names works fine for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): env = self.fsdp_env.copy() env["FSDP_SHARDING_STRATEGY"] = strategy with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.sharding_strategy == ShardingStrategy(i + 1) fsdp_plugin = FullyShardedDataParallelPlugin(sharding_strategy=strategy) assert fsdp_plugin.sharding_strategy == ShardingStrategy(i + 1) def test_backward_prefetch(self): from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch for i, prefetch_policy in enumerate(FSDP_BACKWARD_PREFETCH): expected_value = None if prefetch_policy == "NO_PREFETCH" else BackwardPrefetch(i + 1) env = self.fsdp_env.copy() env["FSDP_BACKWARD_PREFETCH"] = prefetch_policy with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert ( fsdp_plugin.backward_prefetch == expected_value ), f"Actual: {fsdp_plugin.backward_prefetch} != Expected: {expected_value}" # Check if torch enum works if prefetch_policy != "NO_PREFETCH": fsdp_plugin = FullyShardedDataParallelPlugin(backward_prefetch=BackwardPrefetch(i + 1)) assert fsdp_plugin.backward_prefetch == expected_value # Check if name works fsdp_plugin = FullyShardedDataParallelPlugin(backward_prefetch=prefetch_policy) assert fsdp_plugin.backward_prefetch == expected_value def test_state_dict_type(self): from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType for i, state_dict_type in enumerate(FSDP_STATE_DICT_TYPE): env = self.fsdp_env.copy() env["FSDP_STATE_DICT_TYPE"] = state_dict_type with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.state_dict_type == StateDictType(i + 1) if state_dict_type == "FULL_STATE_DICT": assert fsdp_plugin.state_dict_config.offload_to_cpu assert fsdp_plugin.state_dict_config.rank0_only fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_type=StateDictType(i + 1)) assert fsdp_plugin.state_dict_type == StateDictType(i + 1) if state_dict_type == "FULL_STATE_DICT": assert fsdp_plugin.state_dict_config.offload_to_cpu assert fsdp_plugin.state_dict_config.rank0_only def test_auto_wrap_policy(self): model = AutoModel.from_pretrained(BERT_BASE_CASED) for policy in FSDP_AUTO_WRAP_POLICY: env = self.fsdp_env.copy() env["FSDP_AUTO_WRAP_POLICY"] = policy transformer_cls_to_wrap = None min_num_params = None if policy == "TRANSFORMER_BASED_WRAP": env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = "BertLayer" transformer_cls_to_wrap = "BertLayer" elif policy == "SIZE_BASED_WRAP": env["FSDP_MIN_NUM_PARAMS"] = "2000" min_num_params = 2000 # First test via env with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(model) if policy == "NO_WRAP": assert fsdp_plugin.auto_wrap_policy is None else: assert isinstance(fsdp_plugin.auto_wrap_policy, functools.partial) # Then manually set the policy fsdp_plugin = FullyShardedDataParallelPlugin( auto_wrap_policy=policy, transformer_cls_names_to_wrap=transformer_cls_to_wrap, min_num_params=min_num_params, ) fsdp_plugin.set_auto_wrap_policy(model) if policy == "NO_WRAP": assert fsdp_plugin.auto_wrap_policy is None else: assert isinstance(fsdp_plugin.auto_wrap_policy, functools.partial) env = self.fsdp_env.copy() env["FSDP_AUTO_WRAP_POLICY"] = "TRANSFORMER_BASED_WRAP" env["FSDP_TRANSFORMER_CLS_TO_WRAP"] = "T5Layer" with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() with self.assertRaises(Exception) as cm: fsdp_plugin.set_auto_wrap_policy(model) assert "Could not find the transformer layer class T5Layer in the model." in str(cm.exception) fsdp_plugin = FullyShardedDataParallelPlugin( auto_wrap_policy="TRANSFORMER_BASED_WRAP", transformer_cls_names_to_wrap="T5Layer", ) with self.assertRaises(Exception) as cm: fsdp_plugin.set_auto_wrap_policy(model) assert "Could not find the transformer layer class T5Layer in the model." in str(cm.exception) env = self.fsdp_env.copy() env["FSDP_AUTO_WRAP_POLICY"] = "SIZE_BASED_WRAP" env["FSDP_MIN_NUM_PARAMS"] = "0" with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() fsdp_plugin.set_auto_wrap_policy(model) assert fsdp_plugin.auto_wrap_policy is None fsdp_plugin = FullyShardedDataParallelPlugin( auto_wrap_policy="SIZE_BASED_WRAP", min_num_params=0, ) fsdp_plugin.set_auto_wrap_policy(model) assert fsdp_plugin.auto_wrap_policy is None def test_mixed_precision(self): from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: env = self.fsdp_env.copy() env["ACCELERATE_MIXED_PRECISION"] = mp_dtype with mockenv_context(**env): accelerator = Accelerator() if mp_dtype == "fp16": dtype = torch.float16 elif mp_dtype == "bf16": dtype = torch.bfloat16 mp_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype) assert accelerator.state.fsdp_plugin.mixed_precision_policy == mp_policy if mp_dtype == FP16: assert isinstance(accelerator.scaler, ShardedGradScaler) elif mp_dtype == BF16: assert accelerator.scaler is None AcceleratorState._reset_state(True) plugin = FullyShardedDataParallelPlugin( mixed_precision_policy={"param_dtype": dtype, "reduce_dtype": dtype, "buffer_dtype": dtype} ) assert plugin.mixed_precision_policy == mp_policy with mockenv_context(**self.dist_env): accelerator = Accelerator(fsdp_plugin=plugin) assert accelerator.state.fsdp_plugin.mixed_precision_policy == mp_policy AcceleratorState._reset_state(True) def test_mixed_precision_buffer_autocast_override(self): from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler for mp_dtype in dtypes: if mp_dtype == "fp16": dtype = torch.float16 elif mp_dtype == "bf16": dtype = torch.bfloat16 mp_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=torch.float32) env = self.fsdp_env.copy() env["ACCELERATE_MIXED_PRECISION"] = mp_dtype with mockenv_context(**env): accelerator = Accelerator() accelerator.state.fsdp_plugin.set_mixed_precision(dtype, buffer_autocast=True, override=True) assert accelerator.state.fsdp_plugin.mixed_precision_policy == mp_policy if mp_dtype == FP16: assert isinstance(accelerator.scaler, ShardedGradScaler) elif mp_dtype == BF16: assert accelerator.scaler is None AcceleratorState._reset_state(True) def test_cpu_offload(self): from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload for flag in [True, False]: env = self.fsdp_env.copy() env["FSDP_OFFLOAD_PARAMS"] = str(flag).lower() with mockenv_context(**env): fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.cpu_offload == CPUOffload(offload_params=flag) fsdp_plugin = FullyShardedDataParallelPlugin(cpu_offload=flag) assert fsdp_plugin.cpu_offload == CPUOffload(offload_params=flag) def test_cpu_ram_efficient_loading(self): enable_fsdp_ram_efficient_loading() fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.cpu_ram_efficient_loading is True assert os.environ.get("FSDP_CPU_RAM_EFFICIENT_LOADING") == "True" disable_fsdp_ram_efficient_loading() fsdp_plugin = FullyShardedDataParallelPlugin() assert fsdp_plugin.cpu_ram_efficient_loading is False assert os.environ.get("FSDP_CPU_RAM_EFFICIENT_LOADING") == "False" # Skip this test when TorchXLA is available because accelerate.launch does not support TorchXLA FSDP. @require_non_torch_xla @require_fsdp @require_multi_device @slow class FSDPIntegrationTest(TempDirTestCase): test_scripts_folder = path_in_accelerate_package("test_utils", "scripts", "external_deps") def setUp(self): super().setUp() self.performance_lower_bound = 0.82 self.performance_configs = [ "fsdp_shard_grad_op_transformer_based_wrap", "fsdp_full_shard_transformer_based_wrap", ] self.peak_memory_usage_upper_bound = { "multi_gpu_fp16": 3200, "fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000, "fsdp_full_shard_transformer_based_wrap_fp16": 1900, # Disabling below test as it overwhelms the RAM memory usage # on CI self-hosted runner leading to tests getting killed. # "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang } self.n_train = 160 self.n_val = 160 def test_performance(self): self.test_file_path = self.test_scripts_folder / "test_performance.py" cmd = get_launch_command(num_processes=2, num_machines=1, machine_rank=0, use_fsdp=True) for config in self.performance_configs: cmd_config = cmd.copy() for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): if strategy.lower() in config: cmd_config.append(f"--fsdp_sharding_strategy={strategy}") break if "fp32" in config: cmd_config.append("--mixed_precision=no") else: cmd_config.append("--mixed_precision=fp16") if "cpu_offload" in config: cmd_config.append("--fsdp_offload_params=True") for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in config: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}") break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer") elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000") cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--performance_lower_bound={self.performance_lower_bound}", ] ) with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd_config) def test_checkpointing(self): self.test_file_path = self.test_scripts_folder / "test_checkpointing.py" cmd = get_launch_command( num_processes=2, num_machines=1, machine_rank=0, use_fsdp=True, mixed_precision="fp16", fsdp_transformer_layer_cls_to_wrap="BertLayer", ) for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): cmd_config = cmd.copy() cmd_config.append(f"--fsdp_sharding_strategy={strategy}") if strategy != "FULL_SHARD": continue state_dict_config_index = len(cmd_config) for state_dict_type in FSDP_STATE_DICT_TYPE: # Todo: Currently failing for `LOCAL_STATE_DICT` with error # Unexpected key(s) in state_dict: "_fsdp_wrapped_module._flat_param". if state_dict_type == "LOCAL_STATE_DICT": continue cmd_config = cmd_config[:state_dict_config_index] cmd_config.append(f"--fsdp_state_dict_type={state_dict_type}") cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", "--partial_train_epoch=1", ] ) with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd_config) cmd_config = cmd_config[:-1] resume_from_checkpoint = os.path.join(self.tmpdir, "epoch_0") cmd_config.extend( [ f"--resume_from_checkpoint={resume_from_checkpoint}", ] ) with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd_config) def test_peak_memory_usage(self): self.test_file_path = self.test_scripts_folder / "test_peak_memory_usage.py" cmd = get_launch_command(num_processes=2, num_machines=1, machine_rank=0) for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items(): cmd_config = cmd.copy() if "fp16" in spec: cmd_config.extend(["--mixed_precision=fp16"]) else: cmd_config.extend(["--mixed_precision=no"]) if "multi_gpu" in spec: continue else: cmd_config.extend(["--use_fsdp"]) for i, strategy in enumerate(FSDP_SHARDING_STRATEGY): if strategy.lower() in spec: cmd_config.append(f"--fsdp_sharding_strategy={strategy}") break if "cpu_offload" in spec: cmd_config.append("--fsdp_offload_params=True") for policy in FSDP_AUTO_WRAP_POLICY: if policy.lower() in spec: cmd_config.append(f"--fsdp_auto_wrap_policy={policy}") break if policy == "TRANSFORMER_BASED_WRAP": cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer") elif policy == "SIZE_BASED_WRAP": cmd_config.append("--fsdp_min_num_params=2000") cmd_config.extend( [ self.test_file_path, f"--output_dir={self.tmpdir}", f"--peak_memory_upper_bound={peak_mem_upper_bound}", f"--n_train={self.n_train}", f"--n_val={self.n_val}", ] ) with patch_environment(omp_num_threads=1): execute_subprocess_async(cmd_config)
accelerate/tests/fsdp/test_fsdp.py/0
{ "file_path": "accelerate/tests/fsdp/test_fsdp.py", "repo_id": "accelerate", "token_count": 9117 }
12
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import torch import torch.nn as nn from torch.fx import symbolic_trace from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_device, torch_device torch_device = f"{torch_device}:0" if torch_device != "cpu" else "cpu" class ModelForTest(nn.Module): def __init__(self): super().__init__() self.linear1 = nn.Linear(3, 4) self.batchnorm = nn.BatchNorm1d(4) self.linear2 = nn.Linear(4, 5) def forward(self, x): return self.linear2(self.batchnorm(self.linear1(x))) class PreForwardHook(ModelHook): def pre_forward(self, module, *args, **kwargs): return (args[0] + 1,) + args[1:], kwargs class PostForwardHook(ModelHook): def post_forward(self, module, output): return output + 1 class HooksModelTester(unittest.TestCase): def test_add_and_remove_hooks(self): test_model = ModelForTest() test_hook = ModelHook() add_hook_to_module(test_model, test_hook) assert test_model._hf_hook == test_hook assert hasattr(test_model, "_old_forward") # Check adding the hook did not change the name or the signature assert test_model.forward.__name__ == "forward" assert list(inspect.signature(test_model.forward).parameters) == ["x"] remove_hook_from_module(test_model) assert not hasattr(test_model, "_hf_hook") assert not hasattr(test_model, "_old_forward") def test_append_and_remove_hooks(self): test_model = ModelForTest() test_hook = ModelHook() add_hook_to_module(test_model, test_hook) add_hook_to_module(test_model, test_hook, append=True) assert isinstance(test_model._hf_hook, SequentialHook) is True assert len(test_model._hf_hook.hooks) == 2 assert hasattr(test_model, "_old_forward") # Check adding the hook did not change the name or the signature assert test_model.forward.__name__ == "forward" assert list(inspect.signature(test_model.forward).parameters) == ["x"] remove_hook_from_module(test_model) assert not hasattr(test_model, "_hf_hook") assert not hasattr(test_model, "_old_forward") def test_pre_forward_hook_is_executed(self): test_model = ModelForTest() x = torch.randn(2, 3) expected = test_model(x + 1) expected2 = test_model(x + 2) test_hook = PreForwardHook() add_hook_to_module(test_model, test_hook) output1 = test_model(x) assert torch.allclose(output1, expected, atol=1e-5) # Attaching a hook to a model when it already has one replaces, does not chain test_hook = PreForwardHook() add_hook_to_module(test_model, test_hook) output1 = test_model(x) assert torch.allclose(output1, expected, atol=1e-5) # You need to use the sequential hook to chain two or more hooks test_hook = SequentialHook(PreForwardHook(), PreForwardHook()) add_hook_to_module(test_model, test_hook) output2 = test_model(x) assert torch.allclose(output2, expected2, atol=1e-5) def test_post_forward_hook_is_executed(self): test_model = ModelForTest() x = torch.randn(2, 3) output = test_model(x) test_hook = PostForwardHook() add_hook_to_module(test_model, test_hook) output1 = test_model(x) assert torch.allclose(output1, (output + 1), atol=1e-5) # Attaching a hook to a model when it already has one replaces, does not chain test_hook = PostForwardHook() add_hook_to_module(test_model, test_hook) output1 = test_model(x) assert torch.allclose(output1, (output + 1), atol=1e-5) # You need to use the sequential hook to chain two or more hooks test_hook = SequentialHook(PostForwardHook(), PostForwardHook()) add_hook_to_module(test_model, test_hook) output2 = test_model(x) assert torch.allclose(output2, output + 2, atol=1e-5) def test_no_grad_in_hook(self): test_model = ModelForTest() x = torch.randn(2, 3) output = test_model(x) test_hook = PostForwardHook() add_hook_to_module(test_model, test_hook) output1 = test_model(x) assert torch.allclose(output1, (output + 1)) assert output1.requires_grad test_hook.no_grad = True output1 = test_model(x) assert not output1.requires_grad @require_multi_device def test_align_devices_as_model_parallelism(self): model = ModelForTest() # Everything is on CPU assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # This will move each submodule on different devices add_hook_to_module(model.linear1, AlignDevicesHook(execution_device=0)) add_hook_to_module(model.batchnorm, AlignDevicesHook(execution_device=0)) add_hook_to_module(model.linear2, AlignDevicesHook(execution_device=1)) assert model.linear1.weight.device == torch.device(torch_device) assert model.batchnorm.weight.device == torch.device(torch_device) assert model.batchnorm.running_mean.device == torch.device(torch_device) assert model.linear2.weight.device == torch.device(torch_device.replace(":0", ":1")) # We can still make a forward pass. The input does not need to be on any particular device x = torch.randn(2, 3) output = model(x) assert output.device == torch.device(torch_device.replace(":0", ":1")) # We can add a general hook to put back output on same device as input. add_hook_to_module(model, AlignDevicesHook(io_same_device=True)) x = torch.randn(2, 3).to(torch_device) output = model(x) assert output.device == torch.device(torch_device) def test_align_devices_as_cpu_offload(self): model = ModelForTest() # Everything is on CPU assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # This will move each submodule on different devices hook_kwargs = {"execution_device": torch_device, "offload": True} add_hook_to_module(model.linear1, AlignDevicesHook(**hook_kwargs)) add_hook_to_module(model.batchnorm, AlignDevicesHook(**hook_kwargs)) add_hook_to_module(model.linear2, AlignDevicesHook(**hook_kwargs)) # Parameters have been offloaded, so on the meta device assert model.linear1.weight.device == torch.device("meta") assert model.batchnorm.weight.device == torch.device("meta") assert model.linear2.weight.device == torch.device("meta") # Buffers are not included in the offload by default, so are on the execution device device = torch.device(hook_kwargs["execution_device"]) assert model.batchnorm.running_mean.device == device x = torch.randn(2, 3) output = model(x) assert output.device == device # Removing hooks loads back the weights in the model. remove_hook_from_module(model.linear1) remove_hook_from_module(model.batchnorm) remove_hook_from_module(model.linear2) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # Now test with buffers included in the offload hook_kwargs = { "execution_device": torch_device, "offload": True, "offload_buffers": True, } add_hook_to_module(model.linear1, AlignDevicesHook(**hook_kwargs)) add_hook_to_module(model.batchnorm, AlignDevicesHook(**hook_kwargs)) add_hook_to_module(model.linear2, AlignDevicesHook(**hook_kwargs)) # Parameters have been offloaded, so on the meta device, buffers included assert model.linear1.weight.device == torch.device("meta") assert model.batchnorm.weight.device == torch.device("meta") assert model.linear2.weight.device == torch.device("meta") assert model.batchnorm.running_mean.device == torch.device("meta") x = torch.randn(2, 3) output = model(x) assert output.device == device # Removing hooks loads back the weights in the model. remove_hook_from_module(model.linear1) remove_hook_from_module(model.batchnorm) remove_hook_from_module(model.linear2) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") def test_attach_align_device_hook_as_cpu_offload(self): model = ModelForTest() # Everything is on CPU assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # This will move each submodule on different devices execution_device = torch_device attach_align_device_hook(model, execution_device=execution_device, offload=True) # Parameters have been offloaded, so on the meta device assert model.linear1.weight.device == torch.device("meta") assert model.batchnorm.weight.device == torch.device("meta") assert model.linear2.weight.device == torch.device("meta") # Buffers are not included in the offload by default, so are on the execution device device = torch.device(execution_device) assert model.batchnorm.running_mean.device == device x = torch.randn(2, 3) output = model(x) assert output.device == device # Removing hooks loads back the weights in the model. remove_hook_from_submodules(model) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # Now test with buffers included in the offload attach_align_device_hook(model, execution_device=execution_device, offload=True, offload_buffers=True) # Parameters have been offloaded, so on the meta device, buffers included assert model.linear1.weight.device == torch.device("meta") assert model.batchnorm.weight.device == torch.device("meta") assert model.linear2.weight.device == torch.device("meta") assert model.batchnorm.running_mean.device == torch.device("meta") x = torch.randn(2, 3) output = model(x) assert output.device == device # Removing hooks loads back the weights in the model. remove_hook_from_submodules(model) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") def test_attach_align_device_hook_as_cpu_offload_with_weight_map(self): model = ModelForTest() # Everything is on CPU assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # This will move each submodule on different devices execution_device = torch_device attach_align_device_hook( model, execution_device=execution_device, offload=True, weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device assert model.linear1.weight.device == torch.device("meta") assert model.batchnorm.weight.device == torch.device("meta") assert model.linear2.weight.device == torch.device("meta") # Buffers are not included in the offload by default, so are on the execution device device = torch.device(execution_device) assert model.batchnorm.running_mean.device == device x = torch.randn(2, 3) output = model(x) assert output.device == device # Removing hooks loads back the weights in the model. remove_hook_from_submodules(model) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") # Now test with buffers included in the offload attach_align_device_hook( model, execution_device=execution_device, offload=True, weights_map=model.state_dict(), offload_buffers=True, ) # Parameters have been offloaded, so on the meta device, buffers included assert model.linear1.weight.device == torch.device("meta") assert model.batchnorm.weight.device == torch.device("meta") assert model.linear2.weight.device == torch.device("meta") assert model.batchnorm.running_mean.device == torch.device("meta") x = torch.randn(2, 3) output = model(x) assert output.device == device # Removing hooks loads back the weights in the model. remove_hook_from_submodules(model) assert model.linear1.weight.device == torch.device("cpu") assert model.batchnorm.weight.device == torch.device("cpu") assert model.linear2.weight.device == torch.device("cpu") def test_add_remove_hook_fx_graph_module(self): with torch.no_grad(): test_model = ModelForTest() test_hook = ModelHook() x = torch.randn(2, 3) output1 = test_model(x) graph_model = symbolic_trace(test_model) output2 = graph_model(x) assert torch.allclose(output1, output2) add_hook_to_module(graph_model, test_hook) remove_hook_from_module(graph_model, recurse=True) # We want to make sure that `add_hook_to_module` and `remove_hook_from_module` yields back an fx.GraphModule # that behaves correctly (for example that is not frozen, see https://github.com/huggingface/accelerate/pull/2369). # For that, we add a sigmoid node to the FX graph and make sure that the new output (output3 below) is different than # the original model's output. linear2_node = None for node in graph_model.graph.nodes: if node.name == "linear2": linear2_node = node assert linear2_node is not None graph_model.graph.inserting_after(linear2_node) new_node = graph_model.graph.create_node( op="call_function", target=torch.sigmoid, args=(linear2_node,), name="relu" ) output_node = None for node in graph_model.graph.nodes: if node.name == "output": output_node = node assert output_node is not None output_node.replace_input_with(linear2_node, new_node) graph_model.graph.lint() graph_model.recompile() output3 = graph_model(x) # Now the output is expected to be different since we modified the graph. assert not torch.allclose(output1, output3)
accelerate/tests/test_hooks.py/0
{ "file_path": "accelerate/tests/test_hooks.py", "repo_id": "accelerate", "token_count": 6577 }
13
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import logging import os import random import shutil import tempfile import unittest import uuid from contextlib import contextmanager import pytest import torch from parameterized import parameterized_class from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import ( DEFAULT_LAUNCH_COMMAND, execute_subprocess_async, require_non_cpu, require_non_torch_xla, ) from accelerate.utils import DistributedType, ProjectConfiguration, set_seed logger = logging.getLogger(__name__) def dummy_dataloaders(a=2, b=3, batch_size=16, n_train_batches: int = 10, n_valid_batches: int = 2): "Generates a tuple of dummy DataLoaders to test with" def get_dataset(n_batches): x = torch.randn(batch_size * n_batches, 1) return TensorDataset(x, a * x + b + 0.1 * torch.randn(batch_size * n_batches, 1)) train_dataset = get_dataset(n_train_batches) valid_dataset = get_dataset(n_valid_batches) train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size, num_workers=4) valid_dataloader = DataLoader(valid_dataset, shuffle=False, batch_size=batch_size, num_workers=4) return (train_dataloader, valid_dataloader) def train(num_epochs, model, dataloader, optimizer, accelerator, scheduler=None): "Trains for `num_epochs`" rands = [] for epoch in range(num_epochs): # Train quickly model.train() for batch in dataloader: x, y = batch outputs = model(x) loss = torch.nn.functional.mse_loss(outputs, y) accelerator.backward(loss) optimizer.step() optimizer.zero_grad() rands.append(random.random()) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class DummyModel(nn.Module): "Simple model to do y=mx+b" def __init__(self): super().__init__() self.a = nn.Parameter(torch.randn(1)) self.b = nn.Parameter(torch.randn(1)) def forward(self, x): return x * self.a + self.b def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = "use_safetensors" if param["use_safetensors"] is True else "use_pytorch" return f"{func.__name__}_{param_based_name}" @parameterized_class(("use_safetensors",), [[True], [False]], class_name_func=parameterized_custom_name_func) class CheckpointTest(unittest.TestCase): def check_adam_state(self, state1, state2, distributed_type): # For DistributedType.XLA, the `accelerator.save_state` function calls `xm._maybe_convert_to_cpu` before saving. # As a result, all tuple values are converted to lists. Therefore, we need to convert them back here. # Remove this code once Torch XLA fixes this issue. if distributed_type == DistributedType.XLA: state1["param_groups"][0]["betas"] = tuple(state1["param_groups"][0]["betas"]) state2["param_groups"][0]["betas"] = tuple(state2["param_groups"][0]["betas"]) assert state1 == state2 def test_with_save_limit(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(total_limit=1, project_dir=tmpdir, automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) # Save second state accelerator.save_state(safe_serialization=self.use_safetensors) assert len(os.listdir(accelerator.project_dir)) == 1 def test_can_resume_training_with_folder(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() # Train baseline accelerator = Accelerator() model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial initial = os.path.join(tmpdir, "initial") accelerator.save_state(initial, safe_serialization=self.use_safetensors) (a, b) = model.a.item(), model.b.item() opt_state = optimizer.state_dict() ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) (a1, b1) = model.a.item(), model.b.item() opt_state1 = optimizer.state_dict() # Train partially set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() accelerator = Accelerator() model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) accelerator.load_state(initial) (a2, b2) = model.a.item(), model.b.item() opt_state2 = optimizer.state_dict() self.assertEqual(a, a2) self.assertEqual(b, b2) assert a == a2 assert b == b2 self.check_adam_state(opt_state, opt_state2, accelerator.distributed_type) test_rands = train(2, model, train_dataloader, optimizer, accelerator) # Save everything checkpoint = os.path.join(tmpdir, "checkpoint") accelerator.save_state(checkpoint, safe_serialization=self.use_safetensors) # Load everything back in and make sure all states work accelerator.load_state(checkpoint) test_rands += train(1, model, train_dataloader, optimizer, accelerator) (a3, b3) = model.a.item(), model.b.item() opt_state3 = optimizer.state_dict() assert a1 == a3 assert b1 == b3 self.check_adam_state(opt_state1, opt_state3, accelerator.distributed_type) assert ground_truth_rands == test_rands def test_can_resume_training(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) (a, b) = model.a.item(), model.b.item() opt_state = optimizer.state_dict() ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) (a1, b1) = model.a.item(), model.b.item() opt_state1 = optimizer.state_dict() # Train partially set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True) accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) (a2, b2) = model.a.item(), model.b.item() opt_state2 = optimizer.state_dict() assert a == a2 assert b == b2 self.check_adam_state(opt_state, opt_state2, accelerator.distributed_type) test_rands = train(2, model, train_dataloader, optimizer, accelerator) # Save everything accelerator.save_state(safe_serialization=self.use_safetensors) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1")) test_rands += train(1, model, train_dataloader, optimizer, accelerator) (a3, b3) = model.a.item(), model.b.item() opt_state3 = optimizer.state_dict() assert a1 == a3 assert b1 == b3 self.check_adam_state(opt_state1, opt_state3, accelerator.distributed_type) assert ground_truth_rands == test_rands def test_can_resume_training_checkpoints_relative_path(self): # See #1983 # This test is like test_can_resume_training but uses a relative path for the checkpoint and automatically # infers the checkpoint path when loading. @contextmanager def temporary_relative_directory(): # This is equivalent to tempfile.TemporaryDirectory() except that it returns a relative path rand_dir = f"test_path_{uuid.uuid4()}" os.mkdir(rand_dir) try: yield rand_dir finally: shutil.rmtree(rand_dir) with temporary_relative_directory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) (a, b) = model.a.item(), model.b.item() opt_state = optimizer.state_dict() ground_truth_rands = train(3, model, train_dataloader, optimizer, accelerator) (a1, b1) = model.a.item(), model.b.item() opt_state1 = optimizer.state_dict() # Train partially set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(iteration=1, automatic_checkpoint_naming=True) accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader ) accelerator.load_state() # <= infer the directory automatically (a2, b2) = model.a.item(), model.b.item() opt_state2 = optimizer.state_dict() assert a == a2 assert b == b2 self.check_adam_state(opt_state, opt_state2, accelerator.distributed_type) assert opt_state == opt_state2 test_rands = train(2, model, train_dataloader, optimizer, accelerator) # Save everything accelerator.save_state(safe_serialization=self.use_safetensors) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_1")) test_rands += train(1, model, train_dataloader, optimizer, accelerator) (a3, b3) = model.a.item(), model.b.item() opt_state3 = optimizer.state_dict() assert a1 == a3 assert b1 == b3 self.check_adam_state(opt_state1, opt_state3, accelerator.distributed_type) assert ground_truth_rands == test_rands def test_invalid_registration(self): t = torch.tensor([1, 2, 3]) t1 = torch.tensor([2, 3, 4]) net = DummyModel() opt = torch.optim.Adam(net.parameters()) accelerator = Accelerator() with self.assertRaises(ValueError) as ve: accelerator.register_for_checkpointing(t, t1, net, opt) message = str(ve.exception) assert "Item at index 0" in message assert "Item at index 1" in message assert "Item at index 2" not in message assert "Item at index 3" not in message def test_with_scheduler(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) scheduler_state = scheduler.state_dict() train(3, model, train_dataloader, optimizer, accelerator, scheduler) assert scheduler_state != scheduler.state_dict() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) assert scheduler_state == scheduler.state_dict() def test_automatic_loading(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) # Save initial accelerator.save_state(safe_serialization=self.use_safetensors) train(2, model, train_dataloader, optimizer, accelerator, scheduler) (a2, b2) = model.a.item(), model.b.item() # Save a first time accelerator.save_state(safe_serialization=self.use_safetensors) train(1, model, train_dataloader, optimizer, accelerator, scheduler) (a3, b3) = model.a.item(), model.b.item() # Load back in the last saved checkpoint, should point to a2, b2 accelerator.load_state() assert a3 != model.a.item() assert b3 != model.b.item() assert a2 == model.a.item() assert b2 == model.b.item() def test_checkpoint_deletion(self): with tempfile.TemporaryDirectory() as tmpdir: set_seed(42) model = DummyModel() project_config = ProjectConfiguration(automatic_checkpoint_naming=True, total_limit=2) # Train baseline accelerator = Accelerator(project_dir=tmpdir, project_config=project_config) model = accelerator.prepare(model) # Save 3 states: for _ in range(11): accelerator.save_state(safe_serialization=self.use_safetensors) assert not os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_0")) assert os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_9")) assert os.path.exists(os.path.join(tmpdir, "checkpoints", "checkpoint_10")) @require_non_cpu @require_non_torch_xla def test_map_location(self): cmd = DEFAULT_LAUNCH_COMMAND + [inspect.getfile(self.__class__)] execute_subprocess_async( cmd, env={ **os.environ, "USE_SAFETENSORS": str(self.use_safetensors), "OMP_NUM_THREADS": "1", }, ) if __name__ == "__main__": use_safetensors = os.environ.get("USE_SAFETENSORS", "False") == "True" savedir = "/tmp/accelerate/state_checkpointing" model = DummyModel() optimizer = torch.optim.Adam(params=model.parameters(), lr=1e-3) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) train_dataloader, valid_dataloader = dummy_dataloaders() project_config = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline accelerator = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no") if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) model, optimizer, train_dataloader, valid_dataloader, scheduler = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) model, optimizer = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: param_device = group["params"][0].device break assert param_device.type == accelerator.device.type model = model.cpu() accelerator.wait_for_everyone() accelerator.save_state(safe_serialization=use_safetensors) accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu") for group in optimizer.param_groups: param_device = group["params"][0].device break assert ( param_device.type == torch.device("cpu").type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device") for group in optimizer.param_groups: param_device = group["params"][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match="Unsupported optimizer map location passed"): accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid") accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
accelerate/tests/test_state_checkpointing.py/0
{ "file_path": "accelerate/tests/test_state_checkpointing.py", "repo_id": "accelerate", "token_count": 8972 }
14
# Model arguments model_name_or_path: HuggingFaceTB/SmolLM-360M model_revision: main tokenizer_name_or_path: HuggingFaceTB/SmolLM-360M-Instruct # Custom tokenizer with <|im_start|> and <|im_end|> tokens torch_dtype: bfloat16 use_flash_attention_2: true # Data training arguments dataset_mixer: HuggingFaceTB/Magpie-Pro-300K-Filtered-H4: 1.0 HuggingFaceTB/self-oss-instruct-sc2-H4: 1.0 HuggingFaceTB/OpenHermes-2.5-H4: 0.001 HuggingFaceTB/everyday-conversations-llama3.1-2k: 1.0 HuggingFaceTB/instruct-data-basics-smollm-H4: 1.0 dataset_splits: - train_sft - test_sft preprocessing_num_workers: 36 # SFT trainer config bf16: true dataset_kwargs: add_special_tokens: false # We already wrap <bos> and <eos> in the chat template append_concat_token: false # No need to add <eos> across samples do_eval: true evaluation_strategy: epoch gradient_accumulation_steps: 4 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false hub_model_id: smollm-360M-instruct-new hub_strategy: every_save learning_rate: 1.0e-03 # 3e-4 log_level: info logging_steps: 5 logging_strategy: steps lr_scheduler_type: cosine max_seq_length: 2048 max_steps: -1 num_train_epochs: 1 output_dir: data/smollm-360M-instruct-new overwrite_output_dir: true per_device_eval_batch_size: 4 per_device_train_batch_size: 4 push_to_hub: true remove_unused_columns: true report_to: - tensorboard - wandb save_strategy: "no" seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/smollm/sft/config.yaml/0
{ "file_path": "alignment-handbook/recipes/smollm/sft/config.yaml", "repo_id": "alignment-handbook", "token_count": 589 }
15
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import TestCase from datasets import Dataset from transformers import AutoTokenizer from alignment import apply_chat_template, decontaminate_humaneval class DecontamintateHumanEvalTest(TestCase): """Test we decontaminate HumanEval samples correctly""" def setUp(self) -> None: # Create a dataset with a HumanEval sample wrapped in some fake text dataset = Dataset.from_dict( { "messages": [ [{"content": "Hello", "role": "user"}], [ { "content": 'Hello, I am\nfrom\n\n typing import List\n\n\ndef has_close_elements(numbers: List[float], threshold: float) -> bool:\n """ Check if in given list of numbers, are any two numbers closer to each other than\n given threshold.\n >>> has_close_elements([1.0, 2.0, 3.0], 0.5)\n False\n >>> has_close_elements([1.0, 2.8, 3.0, 4.0, 5.0, 2.0], 0.3)\n True\n """\n', "role": "assistant", } ], ] } ) tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta") self.dataset = dataset.map(apply_chat_template, fn_kwargs={"tokenizer": tokenizer, "task": "sft"}) def test_decontamination(self): """Test we decontaminate HumanEval samples correctly""" decontaminated_dataset = self.dataset.filter(decontaminate_humaneval, batched=True) # Check we recover just the first message self.assertEqual(decontaminated_dataset[0]["text"], self.dataset[0]["text"])
alignment-handbook/tests/test_decontaminate.py/0
{ "file_path": "alignment-handbook/tests/test_decontaminate.py", "repo_id": "alignment-handbook", "token_count": 926 }
16
[package] name = "candle-book" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } candle = { workspace = true } candle-datasets = { workspace = true } candle-nn = { workspace = true } candle-transformers = { workspace = true } candle-flash-attn = { workspace = true, optional = true } safetensors = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } num-traits = { workspace = true } intel-mkl-src = { workspace = true, optional = true } cudarc = { workspace = true, optional = true } half = { workspace = true, optional = true } image = { workspace = true, optional = true } anyhow = { workspace = true } tokio = "1.29.1" [dev-dependencies] byteorder = { workspace = true } hf-hub = { workspace = true, features=["tokio"]} clap = { workspace = true } memmap2 = { workspace = true } rand = { workspace = true } tokenizers = { workspace = true, features = ["onig"] } tracing = { workspace = true } tracing-chrome = { workspace = true } tracing-subscriber = { workspace = true } # Necessary to disambiguate with tokio in wasm examples which are 1.28.1 parquet = { workspace = true } image = { workspace = true } [build-dependencies] anyhow = { workspace = true } [features] default = []
candle/candle-book/Cargo.toml/0
{ "file_path": "candle/candle-book/Cargo.toml", "repo_id": "candle", "token_count": 459 }
17
# Installation **With Cuda support**: 1. First, make sure that Cuda is correctly installed. - `nvcc --version` should print information about your Cuda compiler driver. - `nvidia-smi --query-gpu=compute_cap --format=csv` should print your GPUs compute capability, e.g. something like: ```bash compute_cap 8.9 ``` You can also compile the Cuda kernels for a specific compute cap using the `CUDA_COMPUTE_CAP=<compute cap>` environment variable. If any of the above commands errors out, please make sure to update your Cuda version. 2. Create a new app and add [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) with Cuda support. Start by creating a new cargo: ```bash cargo new myapp cd myapp ``` Make sure to add the `candle-core` crate with the cuda feature: ```bash cargo add --git https://github.com/huggingface/candle.git candle-core --features "cuda" ``` Run `cargo build` to make sure everything can be correctly built. ```bash cargo build ``` **Without Cuda support**: Create a new app and add [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) as follows: ```bash cargo new myapp cd myapp cargo add --git https://github.com/huggingface/candle.git candle-core ``` Finally, run `cargo build` to make sure everything can be correctly built. ```bash cargo build ``` **With mkl support** You can also see the `mkl` feature which could be interesting to get faster inference on CPU. [Using mkl](./advanced/mkl.md)
candle/candle-book/src/guide/installation.md/0
{ "file_path": "candle/candle-book/src/guide/installation.md", "repo_id": "candle", "token_count": 487 }
18
mod benchmarks; use criterion::criterion_main; criterion_main!( benchmarks::affine::benches, benchmarks::matmul::benches, benchmarks::random::benches, benchmarks::where_cond::benches, benchmarks::conv_transpose2d::benches, benchmarks::qmatmul::benches, benchmarks::unary::benches );
candle/candle-core/benches/bench_main.rs/0
{ "file_path": "candle/candle-core/benches/bench_main.rs", "repo_id": "candle", "token_count": 113 }
19
use crate::{op::BackpropOp, op::Op, Error, Result, Tensor}; #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConv1D { pub(crate) b_size: usize, // Maybe we should have a version without l_in as this bit depends on the input and not only on // the weights. pub(crate) l_in: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) k_size: usize, pub(crate) padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, } impl ParamsConv1D { pub(crate) fn l_out(&self) -> usize { (self.l_in + 2 * self.padding - self.dilation * (self.k_size - 1) - 1) / self.stride + 1 } pub(crate) fn out_dims(&self) -> Vec<usize> { let l_out = self.l_out(); vec![self.b_size, self.c_out, l_out] } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConvTranspose1D { pub(crate) b_size: usize, pub(crate) l_in: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) k_size: usize, pub(crate) padding: usize, pub(crate) output_padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, } impl ParamsConvTranspose1D { pub(crate) fn l_out(&self) -> usize { (self.l_in - 1) * self.stride - 2 * self.padding + self.dilation * (self.k_size - 1) + self.output_padding + 1 } pub(crate) fn out_dims(&self) -> Vec<usize> { let l_out = self.l_out(); vec![self.b_size, self.c_out, l_out] } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum CudnnFwdAlgo { ImplicitGemm, ImplicitPrecompGemm, Gemm, Direct, Fft, FftTiling, Winograd, WinogradNonFused, Count, } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConv2D { pub(crate) b_size: usize, pub(crate) i_h: usize, pub(crate) i_w: usize, pub(crate) k_h: usize, pub(crate) k_w: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, pub cudnn_fwd_algo: Option<CudnnFwdAlgo>, } impl ParamsConv2D { pub(crate) fn out_h(&self) -> usize { (self.i_h + 2 * self.padding - self.dilation * (self.k_h - 1) - 1) / self.stride + 1 } pub(crate) fn out_w(&self) -> usize { (self.i_w + 2 * self.padding - self.dilation * (self.k_w - 1) - 1) / self.stride + 1 } pub(crate) fn out_dims(&self) -> Vec<usize> { vec![self.b_size, self.c_out, self.out_h(), self.out_w()] } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConvTranspose2D { pub(crate) b_size: usize, pub(crate) i_h: usize, pub(crate) i_w: usize, pub(crate) k_h: usize, pub(crate) k_w: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) padding: usize, pub(crate) output_padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, } impl ParamsConvTranspose2D { pub(crate) fn out_h(&self) -> usize { (self.i_h - 1) * self.stride + self.dilation * (self.k_h - 1) + self.output_padding + 1 - 2 * self.padding } pub(crate) fn out_w(&self) -> usize { (self.i_w - 1) * self.stride + self.dilation * (self.k_w - 1) + self.output_padding + 1 - 2 * self.padding } pub(crate) fn out_dims(&self) -> Vec<usize> { vec![self.b_size, self.c_out, self.out_h(), self.out_w()] } } impl Tensor { fn conv1d_single_group(&self, kernel: &Self, params: &ParamsConv1D) -> Result<Self> { let storage = self.storage() .conv1d(self.layout(), &kernel.storage(), kernel.layout(), params)?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::Conv1D { arg, kernel, padding: params.padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } /// Applies a 1D convolution over the input tensor. pub fn conv1d( &self, kernel: &Self, padding: usize, stride: usize, dilation: usize, groups: usize, ) -> Result<Self> { let (c_out, c_in_k, k_size) = kernel.dims3()?; let (b_size, c_in, l_in) = self.dims3()?; if c_in != c_in_k * groups { Err(Error::Conv1dInvalidArgs { inp_shape: self.shape().clone(), k_shape: kernel.shape().clone(), padding, stride, msg: "the number of in-channels on the input doesn't match the kernel size", } .bt())? } let params = ParamsConv1D { b_size, l_in, c_out: c_out / groups, c_in: c_in / groups, k_size, padding, stride, dilation, }; if groups == 1 { self.conv1d_single_group(kernel, &params) } else { let blocks = self.chunk(groups, 1)?; let kernel = kernel.chunk(groups, 0)?; let blocks = blocks .iter() .zip(&kernel) .map(|(block, kernel)| block.conv1d_single_group(kernel, &params)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&blocks, 1) } } fn conv_transpose1d_single_group( &self, kernel: &Self, params: &ParamsConvTranspose1D, ) -> Result<Self> { let storage = self.storage().conv_transpose1d( self.layout(), &kernel.storage(), kernel.layout(), params, )?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::ConvTranspose1D { arg, kernel, padding: params.padding, output_padding: params.output_padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } /// Applies a 1D transposed convolution over the input tensor. pub fn conv_transpose1d( &self, kernel: &Self, padding: usize, output_padding: usize, stride: usize, dilation: usize, groups: usize, ) -> Result<Self> { let (c_in_k, c_out, k_size) = kernel.dims3()?; let (b_size, c_in, l_in) = self.dims3()?; if c_in != c_in_k { crate::bail!("in_channel mismatch between input ({c_in}) and kernel ({c_in_k})") } if c_in % groups != 0 { crate::bail!("in_channel {c_in} is not divisible by the number of groups") } let params = ParamsConvTranspose1D { b_size, l_in, k_size, c_out, c_in: c_in / groups, padding, output_padding, stride, dilation, }; if groups == 1 { self.conv_transpose1d_single_group(kernel, &params) } else { let blocks = self.chunk(groups, 1)?; let kernel = kernel.chunk(groups, 0)?; let blocks = blocks .iter() .zip(&kernel) .map(|(block, kernel)| block.conv_transpose1d_single_group(kernel, &params)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&blocks, 1) } } fn conv2d_single_group(&self, kernel: &Self, params: &ParamsConv2D) -> Result<Self> { let storage = self.storage() .conv2d(self.layout(), &kernel.storage(), kernel.layout(), params)?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::Conv2D { arg, kernel, padding: params.padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } /// Applies a 2D convolution over the input tensor. pub fn conv2d( &self, kernel: &Self, padding: usize, stride: usize, dilation: usize, groups: usize, ) -> Result<Self> { let (b_size, c_in, i_h, i_w) = self.dims4()?; let (c_out, c_in_k, k_h, k_w) = kernel.dims4()?; if c_in != c_in_k * groups { crate::bail!( "in_channel mismatch between input ({c_in}, groups {groups}) and kernel ({c_in_k})" ) } let params = ParamsConv2D { b_size, i_h, i_w, k_h, k_w, c_out: c_out / groups, c_in: c_in / groups, padding, stride, dilation, cudnn_fwd_algo: None, }; if groups == 1 { self.conv2d_single_group(kernel, &params) } else { let blocks = self.chunk(groups, 1)?; let kernel = kernel.chunk(groups, 0)?; let blocks = blocks .iter() .zip(&kernel) .map(|(block, kernel)| block.conv2d_single_group(kernel, &params)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&blocks, 1) } } /// Applies a 2D transposed convolution over the input tensor. pub fn conv_transpose2d( &self, kernel: &Self, padding: usize, output_padding: usize, stride: usize, dilation: usize, ) -> Result<Self> { let (b_size, c_in, i_h, i_w) = self.dims4()?; let (c_in_k, c_out, k_h, k_w) = kernel.dims4()?; if c_in != c_in_k { crate::bail!("in_channel mismatch between input ({c_in}) and kernel ({c_in_k})") } let params = ParamsConvTranspose2D { b_size, i_h, i_w, k_h, k_w, c_out, c_in, padding, output_padding, stride, dilation, }; let storage = self.storage().conv_transpose2d( self.layout(), &kernel.storage(), kernel.layout(), &params, )?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::ConvTranspose2D { arg, kernel, padding: params.padding, output_padding: params.output_padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } }
candle/candle-core/src/conv.rs/0
{ "file_path": "candle/candle-core/src/conv.rs", "repo_id": "candle", "token_count": 5807 }
20
use crate::backend::BackendDevice; use crate::cpu_backend::CpuDevice; use crate::{CpuStorage, DType, Result, Shape, Storage, WithDType}; /// A `DeviceLocation` represents a physical device whereas multiple `Device` /// can live on the same location (typically for cuda devices). #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum DeviceLocation { Cpu, Cuda { gpu_id: usize }, Metal { gpu_id: usize }, } #[derive(Debug, Clone)] pub enum Device { Cpu, Cuda(crate::CudaDevice), Metal(crate::MetalDevice), } pub trait NdArray { fn shape(&self) -> Result<Shape>; fn to_cpu_storage(&self) -> CpuStorage; } impl<S: WithDType> NdArray for S { fn shape(&self) -> Result<Shape> { Ok(Shape::from(())) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage(&[*self]) } } impl<S: WithDType, const N: usize> NdArray for &[S; N] { fn shape(&self) -> Result<Shape> { Ok(Shape::from(self.len())) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage(self.as_slice()) } } impl<S: WithDType> NdArray for &[S] { fn shape(&self) -> Result<Shape> { Ok(Shape::from(self.len())) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage(self) } } impl<S: WithDType, const N: usize, const M: usize> NdArray for &[[S; N]; M] { fn shape(&self) -> Result<Shape> { Ok(Shape::from((M, N))) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage_owned(self.concat()) } } impl<S: WithDType, const N1: usize, const N2: usize, const N3: usize> NdArray for &[[[S; N3]; N2]; N1] { fn shape(&self) -> Result<Shape> { Ok(Shape::from((N1, N2, N3))) } fn to_cpu_storage(&self) -> CpuStorage { let mut vec = Vec::with_capacity(N1 * N2 * N3); for i1 in 0..N1 { for i2 in 0..N2 { vec.extend(self[i1][i2]) } } S::to_cpu_storage_owned(vec) } } impl<S: WithDType, const N1: usize, const N2: usize, const N3: usize, const N4: usize> NdArray for &[[[[S; N4]; N3]; N2]; N1] { fn shape(&self) -> Result<Shape> { Ok(Shape::from((N1, N2, N3, N4))) } fn to_cpu_storage(&self) -> CpuStorage { let mut vec = Vec::with_capacity(N1 * N2 * N3 * N4); for i1 in 0..N1 { for i2 in 0..N2 { for i3 in 0..N3 { vec.extend(self[i1][i2][i3]) } } } S::to_cpu_storage_owned(vec) } } impl<S: NdArray> NdArray for Vec<S> { fn shape(&self) -> Result<Shape> { if self.is_empty() { crate::bail!("empty array") } let shape0 = self[0].shape()?; let n = self.len(); for v in self.iter() { let shape = v.shape()?; if shape != shape0 { crate::bail!("two elements have different shapes {shape:?} {shape0:?}") } } Ok(Shape::from([[n].as_slice(), shape0.dims()].concat())) } fn to_cpu_storage(&self) -> CpuStorage { // This allocates intermediary memory and shouldn't be necessary. let storages = self.iter().map(|v| v.to_cpu_storage()).collect::<Vec<_>>(); CpuStorage::concat(storages.as_slice()).unwrap() } } impl Device { pub fn new_cuda(ordinal: usize) -> Result<Self> { Ok(Self::Cuda(crate::CudaDevice::new(ordinal)?)) } pub fn new_metal(ordinal: usize) -> Result<Self> { Ok(Self::Metal(crate::MetalDevice::new(ordinal)?)) } pub fn set_seed(&self, seed: u64) -> Result<()> { match self { Self::Cpu => CpuDevice.set_seed(seed), Self::Cuda(c) => c.set_seed(seed), Self::Metal(m) => m.set_seed(seed), } } pub fn same_device(&self, rhs: &Self) -> bool { match (self, rhs) { (Self::Cpu, Self::Cpu) => true, (Self::Cuda(lhs), Self::Cuda(rhs)) => lhs.same_device(rhs), (Self::Metal(lhs), Self::Metal(rhs)) => lhs.same_device(rhs), _ => false, } } pub fn location(&self) -> DeviceLocation { match self { Self::Cpu => DeviceLocation::Cpu, Self::Cuda(device) => device.location(), Device::Metal(device) => device.location(), } } pub fn is_cpu(&self) -> bool { matches!(self, Self::Cpu) } pub fn is_cuda(&self) -> bool { matches!(self, Self::Cuda(_)) } pub fn is_metal(&self) -> bool { matches!(self, Self::Metal(_)) } pub fn supports_bf16(&self) -> bool { match self { Self::Cuda(_) => true, Self::Metal(_) | Self::Cpu => false, } } /// Return `BF16` for devices that support it, otherwise default to `F32`. pub fn bf16_default_to_f32(&self) -> DType { if self.supports_bf16() { DType::BF16 } else { DType::F32 } } pub fn cuda_if_available(ordinal: usize) -> Result<Self> { if crate::utils::cuda_is_available() { Self::new_cuda(ordinal) } else { Ok(Self::Cpu) } } pub(crate) fn rand_uniform_f64( &self, lo: f64, up: f64, shape: &Shape, dtype: DType, ) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.rand_uniform(shape, dtype, lo, up)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { // TODO: Remove the special case if we start supporting generating f16/bf16 directly. if dtype == DType::F16 || dtype == DType::BF16 { let storage = device.rand_uniform(shape, DType::F32, lo, up)?; Storage::Cuda(storage).to_dtype(&crate::Layout::contiguous(shape), dtype) } else { let storage = device.rand_uniform(shape, dtype, lo, up)?; Ok(Storage::Cuda(storage)) } } Device::Metal(device) => { let storage = device.rand_uniform(shape, dtype, lo, up)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn rand_uniform<T: crate::FloatDType>( &self, lo: T, up: T, shape: &Shape, ) -> Result<Storage> { self.rand_uniform_f64(lo.to_f64(), up.to_f64(), shape, T::DTYPE) } pub(crate) fn rand_normal_f64( &self, mean: f64, std: f64, shape: &Shape, dtype: DType, ) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.rand_normal(shape, dtype, mean, std)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { // TODO: Remove the special case if we start supporting generating f16/bf16 directly. if dtype == DType::F16 || dtype == DType::BF16 { let storage = device.rand_normal(shape, DType::F32, mean, std)?; Storage::Cuda(storage).to_dtype(&crate::Layout::contiguous(shape), dtype) } else { let storage = device.rand_normal(shape, dtype, mean, std)?; Ok(Storage::Cuda(storage)) } } Device::Metal(device) => { let storage = device.rand_normal(shape, dtype, mean, std)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn rand_normal<T: crate::FloatDType>( &self, mean: T, std: T, shape: &Shape, ) -> Result<Storage> { self.rand_normal_f64(mean.to_f64(), std.to_f64(), shape, T::DTYPE) } pub(crate) fn ones(&self, shape: &Shape, dtype: DType) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.ones_impl(shape, dtype)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { let storage = device.ones_impl(shape, dtype)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.ones_impl(shape, dtype)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn zeros(&self, shape: &Shape, dtype: DType) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.zeros_impl(shape, dtype)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { let storage = device.zeros_impl(shape, dtype)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.zeros_impl(shape, dtype)?; Ok(Storage::Metal(storage)) } } } pub(crate) unsafe fn alloc_uninit(&self, shape: &Shape, dtype: DType) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.alloc_uninit(shape, dtype)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { let storage = device.alloc_uninit(shape, dtype)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.alloc_uninit(shape, dtype)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn storage_from_slice<D: WithDType>(&self, data: &[D]) -> Result<Storage> { match self { Device::Cpu => Ok(Storage::Cpu(data.to_cpu_storage())), Device::Cuda(device) => { let storage = device.storage_from_slice(data)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.storage_from_slice(data)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn storage<A: NdArray>(&self, array: A) -> Result<Storage> { match self { Device::Cpu => Ok(Storage::Cpu(array.to_cpu_storage())), Device::Cuda(device) => { let storage = array.to_cpu_storage(); let storage = device.storage_from_cpu_storage_owned(storage)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = array.to_cpu_storage(); let storage = device.storage_from_cpu_storage_owned(storage)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn storage_owned<S: WithDType>(&self, data: Vec<S>) -> Result<Storage> { match self { Device::Cpu => Ok(Storage::Cpu(S::to_cpu_storage_owned(data))), Device::Cuda(device) => { let storage = S::to_cpu_storage_owned(data); let storage = device.storage_from_cpu_storage_owned(storage)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = S::to_cpu_storage_owned(data); let storage = device.storage_from_cpu_storage_owned(storage)?; Ok(Storage::Metal(storage)) } } } pub fn synchronize(&self) -> Result<()> { match self { Self::Cpu => Ok(()), Self::Cuda(d) => d.synchronize(), Self::Metal(d) => d.synchronize(), } } }
candle/candle-core/src/device.rs/0
{ "file_path": "candle/candle-core/src/device.rs", "repo_id": "candle", "token_count": 6082 }
21
use super::{GgmlDType, QStorage}; use crate::quantized::k_quants::GgmlType; use crate::{backend::BackendDevice, cuda_backend::WrapErr}; use crate::{CudaDevice, CudaStorage, Result}; use half::f16; use cudarc::driver::{CudaSlice, CudaView, DeviceSlice}; #[derive(Clone, Debug)] pub struct QCudaStorage { data: CudaSlice<u8>, dtype: GgmlDType, device: CudaDevice, } static FORCE_DMMV: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false); pub fn set_force_dmmv(f: bool) { FORCE_DMMV.store(f, std::sync::atomic::Ordering::Relaxed) } pub const WARP_SIZE: usize = 32; pub const MMQ_X_Q4_0_AMPERE: usize = 4; pub const MMQ_Y_Q4_0_AMPERE: usize = 32; pub const NWARPS_Q4_0_AMPERE: usize = 4; pub const GGML_CUDA_MMV_X: usize = 32; pub const GGML_CUDA_MMV_Y: usize = 1; pub const CUDA_QUANTIZE_BLOCK_SIZE: usize = 256; pub const CUDA_DEQUANTIZE_BLOCK_SIZE: usize = 256; pub const MATRIX_ROW_PADDING: usize = 512; fn ceil_div(p: usize, q: usize) -> usize { (p + q - 1) / q } fn pad(p: usize, q: usize) -> usize { ceil_div(p, q) * q } fn quantize_q8_1( src: &CudaView<f32>, dst: &mut CudaSlice<u8>, elem_count: usize, ky: usize, dev: &CudaDevice, ) -> Result<()> { use cudarc::driver::LaunchAsync; let kx = elem_count; let kx_padded = pad(kx, MATRIX_ROW_PADDING); let num_blocks = ceil_div(kx_padded, CUDA_QUANTIZE_BLOCK_SIZE); let func = dev.get_or_load_func("quantize_q8_1", candle_kernels::QUANTIZED)?; let cfg = cudarc::driver::LaunchConfig { grid_dim: (num_blocks as u32, ky as u32, 1), block_dim: (CUDA_QUANTIZE_BLOCK_SIZE as u32, 1, 1), shared_mem_bytes: 0, }; let params = (src, dst, kx as i32, kx_padded as i32); unsafe { func.launch(cfg, params) }.w()?; Ok(()) } fn dequantize_f32( data: &CudaSlice<u8>, dtype: GgmlDType, elem_count: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let nb = (elem_count + 255) / 256; let (kernel_name, is_k, block_dim, num_blocks) = match dtype { GgmlDType::Q4_0 => ("dequantize_block_q4_0_f32", false, 32, nb), GgmlDType::Q4_1 => ("dequantize_block_q4_1_f32", false, 32, nb), GgmlDType::Q5_0 => ( "dequantize_block_q5_0_f32", false, CUDA_DEQUANTIZE_BLOCK_SIZE, ceil_div(elem_count, 2 * CUDA_DEQUANTIZE_BLOCK_SIZE), ), GgmlDType::Q5_1 => ( "dequantize_block_q5_1_f32", false, CUDA_DEQUANTIZE_BLOCK_SIZE, ceil_div(elem_count, 2 * CUDA_DEQUANTIZE_BLOCK_SIZE), ), GgmlDType::Q8_0 => ("dequantize_block_q8_0_f32", false, 32, nb), GgmlDType::Q2K => ("dequantize_block_q2_K_f32", true, 64, nb), GgmlDType::Q3K => ("dequantize_block_q3_K_f32", true, 64, nb), GgmlDType::Q4K => ("dequantize_block_q4_K_f32", true, 32, nb), GgmlDType::Q5K => ("dequantize_block_q5_K_f32", true, 64, nb), GgmlDType::Q6K => ("dequantize_block_q6_K_f32", true, 64, nb), GgmlDType::Q8K => ("dequantize_block_q8_K_f32", true, 32, nb), _ => crate::bail!("unsupported dtype for dequantize {dtype:?}"), }; let func = dev.get_or_load_func(kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f32>(elem_count).w()? }; // See e.g. // https://github.com/ggerganov/llama.cpp/blob/cbbd1efa06f8c09f9dff58ff9d9af509cc4c152b/ggml-cuda.cu#L7270 let cfg = cudarc::driver::LaunchConfig { grid_dim: (num_blocks as u32, 1, 1), block_dim: (block_dim as u32, 1, 1), shared_mem_bytes: 0, }; if is_k { let params = (data, &dst); unsafe { func.launch(cfg, params) }.w()?; } else { let nb32 = match dtype { GgmlDType::Q5_0 | GgmlDType::Q5_1 => elem_count, _ => elem_count / 32, }; let params = (data, &dst, nb32 as i32); unsafe { func.launch(cfg, params) }.w()?; } Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } fn dequantize_f16( data: &CudaSlice<u8>, dtype: GgmlDType, elem_count: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let nb = (elem_count + 255) / 256; let (kernel_name, is_k, block_dim, num_blocks) = match dtype { GgmlDType::Q4_0 => ("dequantize_block_q4_0_f16", false, 32, nb), GgmlDType::Q4_1 => ("dequantize_block_q4_1_f16", false, 32, nb), GgmlDType::Q5_0 => ( "dequantize_block_q5_0_f16", false, CUDA_DEQUANTIZE_BLOCK_SIZE, ceil_div(elem_count, 2 * CUDA_DEQUANTIZE_BLOCK_SIZE), ), GgmlDType::Q5_1 => ( "dequantize_block_q5_1_f16", false, CUDA_DEQUANTIZE_BLOCK_SIZE, ceil_div(elem_count, 2 * CUDA_DEQUANTIZE_BLOCK_SIZE), ), GgmlDType::Q8_0 => ("dequantize_block_q8_0_f16", false, 32, nb), GgmlDType::Q2K => ("dequantize_block_q2_K_f16", true, 64, nb), GgmlDType::Q3K => ("dequantize_block_q3_K_f16", true, 64, nb), GgmlDType::Q4K => ("dequantize_block_q4_K_f16", true, 32, nb), GgmlDType::Q5K => ("dequantize_block_q5_K_f16", true, 64, nb), GgmlDType::Q6K => ("dequantize_block_q6_K_f16", true, 64, nb), GgmlDType::Q8K => ("dequantize_block_q8_K_f16", true, 32, nb), _ => crate::bail!("unsupported dtype for dequantize {dtype:?}"), }; let func = dev.get_or_load_func(kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f16>(elem_count).w()? }; // See e.g. // https://github.com/ggerganov/llama.cpp/blob/cbbd1efa06f8c09f9dff58ff9d9af509cc4c152b/ggml-cuda.cu#L7270 let cfg = cudarc::driver::LaunchConfig { grid_dim: (num_blocks as u32, 1, 1), block_dim: (block_dim as u32, 1, 1), shared_mem_bytes: 0, }; if is_k { let params = (data, &dst); unsafe { func.launch(cfg, params) }.w()?; } else { let nb32 = match dtype { GgmlDType::Q5_0 | GgmlDType::Q5_1 => elem_count, _ => elem_count / 32, }; let params = (data, &dst, nb32 as i32); unsafe { func.launch(cfg, params) }.w()?; } Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } fn dequantize_mul_mat_vec( data: &CudaSlice<u8>, y: &CudaView<f32>, dtype: GgmlDType, ncols: usize, nrows: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let data_elems = data.len() / dtype.type_size() * dtype.block_size(); if data_elems < ncols * nrows { crate::bail!("unexpected data size {}, ncols {ncols} {nrows}", data_elems) } if y.len() != ncols { crate::bail!("unexpected y size {}, ncols {ncols} {nrows}", y.len()) } let kernel_name = match dtype { GgmlDType::Q4_0 => "dequantize_mul_mat_vec_q4_0_cuda", GgmlDType::Q4_1 => "dequantize_mul_mat_vec_q4_1_cuda", GgmlDType::Q5_0 => "dequantize_mul_mat_vec_q5_0_cuda", GgmlDType::Q5_1 => "dequantize_mul_mat_vec_q5_1_cuda", GgmlDType::Q8_0 => "dequantize_mul_mat_vec_q8_0_cuda", GgmlDType::Q2K => "dequantize_mul_mat_vec_q2_k", GgmlDType::Q3K => "dequantize_mul_mat_vec_q3_k", GgmlDType::Q4K => "dequantize_mul_mat_vec_q4_k", GgmlDType::Q5K => "dequantize_mul_mat_vec_q5_k", GgmlDType::Q6K => "dequantize_mul_mat_vec_q6_k", _ => crate::bail!("unsupported dtype for quantized matmul {dtype:?}"), }; let func = dev.get_or_load_func(kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f32>(nrows).w()? }; let block_num_y = ceil_div(nrows, GGML_CUDA_MMV_Y); let cfg = cudarc::driver::LaunchConfig { grid_dim: (block_num_y as u32, 1, 1), block_dim: (WARP_SIZE as u32, GGML_CUDA_MMV_Y as u32, 1), shared_mem_bytes: 0, }; let params = (data, y, &dst, ncols as i32, nrows as i32); unsafe { func.launch(cfg, params) }.w()?; Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } fn mul_mat_vec_via_q8_1( data: &CudaSlice<u8>, y: &CudaView<f32>, dtype: GgmlDType, ncols: usize, nrows: usize, b_size: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let data_elems = data.len() / dtype.type_size() * dtype.block_size(); if data_elems < ncols * nrows { crate::bail!("unexpected data size {}, ncols {ncols} {nrows}", data_elems) } if y.len() != ncols * b_size { crate::bail!("unexpected y size {}, ncols {ncols} {nrows}", y.len()) } if b_size == 0 || b_size > 8 { crate::bail!("only bsize between 1 and 8 are supported, got {b_size}") } // Start by quantizing y let ncols_padded = pad(ncols, MATRIX_ROW_PADDING); let y_size_in_bytes = b_size * ncols_padded * GgmlDType::Q8_1.type_size() / GgmlDType::Q8_1.block_size(); let mut y_q8_1 = unsafe { dev.alloc::<u8>(y_size_in_bytes).w()? }; quantize_q8_1(y, &mut y_q8_1, ncols, b_size, dev)?; let kernel_name = match dtype { GgmlDType::Q4_0 => "mul_mat_vec_q4_0_q8_1_cuda", GgmlDType::Q4_1 => "mul_mat_vec_q4_1_q8_1_cuda", GgmlDType::Q5_0 => "mul_mat_vec_q5_0_q8_1_cuda", GgmlDType::Q5_1 => "mul_mat_vec_q5_1_q8_1_cuda", GgmlDType::Q8_0 => "mul_mat_vec_q8_0_q8_1_cuda", GgmlDType::Q2K => "mul_mat_vec_q2_K_q8_1_cuda", GgmlDType::Q3K => "mul_mat_vec_q3_K_q8_1_cuda", GgmlDType::Q4K => "mul_mat_vec_q4_K_q8_1_cuda", GgmlDType::Q5K => "mul_mat_vec_q5_K_q8_1_cuda", GgmlDType::Q6K => "mul_mat_vec_q6_K_q8_1_cuda", _ => crate::bail!("unsupported dtype for quantized matmul {dtype:?}"), }; let kernel_name = format!("{kernel_name}{b_size}"); let func = dev.get_or_load_func(&kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f32>(nrows * b_size).w()? }; // https://github.com/ggerganov/llama.cpp/blob/facb8b56f8fd3bb10a693bf0943ae9d69d0828ef/ggml-cuda/mmvq.cu#L98 let (nblocks, nwarps) = match b_size { 1 => (nrows as u32, 4), 2..=4 => ((nrows as u32 + 1) / 2, 4), 5..=8 => ((nrows as u32 + 1) / 2, 2), _ => crate::bail!("unexpected bsize {b_size}"), }; let cfg = cudarc::driver::LaunchConfig { grid_dim: (nblocks, 1, 1), block_dim: (WARP_SIZE as u32, nwarps, 1), shared_mem_bytes: 0, }; let params = ( data, &y_q8_1, &dst, /* ncols_x */ ncols as i32, /* nrows_x */ nrows as i32, /* nrows_y */ ncols_padded as i32, /* nrows_dst */ nrows as i32, ); unsafe { func.launch(cfg, params) }.w()?; Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } #[allow(clippy::too_many_arguments)] fn mul_mat_via_q8_1( data: &CudaSlice<u8>, y: &CudaView<f32>, dtype: GgmlDType, x_rows: usize, x_cols: usize, y_rows: usize, y_cols: usize, dev: &CudaDevice, ) -> Result<CudaStorage> { use cudarc::driver::LaunchAsync; let data_elems = data.len() / dtype.type_size() * dtype.block_size(); if data_elems < x_rows * x_cols { crate::bail!("unexpected lhs size {}, {x_rows} {x_cols}", data_elems) } if y.len() != y_rows * y_cols { crate::bail!("unexpected y size {}, {y_rows} {y_cols}", y.len()) } if x_cols != y_rows { crate::bail!("unexpected x/y size {x_rows} {x_cols} {y_rows} {y_cols}") } let k = x_cols; // Start by quantizing y let k_padded = pad(k, MATRIX_ROW_PADDING); let y_size_in_bytes = k_padded * y_rows * GgmlDType::Q8_1.type_size() / GgmlDType::Q8_1.block_size(); let mut y_q8_1 = unsafe { dev.alloc::<u8>(y_size_in_bytes).w()? }; quantize_q8_1(y, &mut y_q8_1, k, y_cols, dev)?; let (kernel_name, mmq_x, mmq_y) = match dtype { GgmlDType::Q4_0 => ("mul_mat_q4_0", 64, 128), GgmlDType::Q4_1 => ("mul_mat_q4_1", 64, 128), GgmlDType::Q5_0 => ("mul_mat_q5_0", 128, 64), GgmlDType::Q5_1 => ("mul_mat_q5_1", 128, 64), GgmlDType::Q8_0 => ("mul_mat_q8_0", 128, 64), GgmlDType::Q2K => ("mul_mat_q2_K", 64, 128), GgmlDType::Q3K => ("mul_mat_q3_K", 128, 128), GgmlDType::Q4K => ("mul_mat_q4_K", 64, 128), GgmlDType::Q5K => ("mul_mat_q5_K", 64, 128), GgmlDType::Q6K => ("mul_mat_q6_K", 64, 64), _ => crate::bail!("unsupported dtype for quantized matmul {dtype:?}"), }; let func = dev.get_or_load_func(kernel_name, candle_kernels::QUANTIZED)?; let dst = unsafe { dev.alloc::<f32>(x_rows * y_cols).w()? }; let cfg = cudarc::driver::LaunchConfig { grid_dim: ( ceil_div(x_rows, mmq_y) as u32, ceil_div(y_cols, mmq_x) as u32, 1, ), block_dim: (WARP_SIZE as u32, 4, 1), shared_mem_bytes: 0, }; let params = ( /* vx */ data, /* vy */ &y_q8_1, /* dst */ &dst, /* ncols_x */ x_cols as i32, /* nrows_x */ x_rows as i32, /* ncols_y */ y_cols as i32, /* nrows_y */ k_padded as i32, /* nrows_dst */ x_rows as i32, ); unsafe { func.launch(cfg, params) }.w()?; Ok(CudaStorage::wrap_cuda_slice(dst, dev.clone())) } impl QCudaStorage { pub fn zeros(device: &CudaDevice, el_count: usize, dtype: GgmlDType) -> Result<Self> { let size_in_bytes = ceil_div(el_count, dtype.block_size()) * dtype.type_size(); let data = device.alloc_zeros::<u8>(size_in_bytes).w()?; Ok(QCudaStorage { data, device: device.clone(), dtype, }) } pub fn dtype(&self) -> GgmlDType { self.dtype } pub fn device(&self) -> &CudaDevice { &self.device } pub fn dequantize(&self, elem_count: usize) -> Result<CudaStorage> { fn deq<T: GgmlType>(buffer: &[u8], n: usize, dst: &mut [f32]) -> Result<()> { let slice = unsafe { std::slice::from_raw_parts(buffer.as_ptr() as *const T, n) }; let vec = slice.to_vec(); T::to_float(&vec, dst) } let fast_kernel = matches!( self.dtype, GgmlDType::Q4_0 | GgmlDType::Q4_1 | GgmlDType::Q5_0 | GgmlDType::Q5_1 | GgmlDType::Q8_0 | GgmlDType::Q2K | GgmlDType::Q3K | GgmlDType::Q4K | GgmlDType::Q5K | GgmlDType::Q6K | GgmlDType::Q8K ); if fast_kernel { return dequantize_f32(&self.data, self.dtype, elem_count, self.device()); } // Run the dequantization on cpu. let buffer = self.device.dtoh_sync_copy(&self.data).w()?; let mut out = vec![0.0; elem_count]; let block_len = elem_count / self.dtype.block_size(); match self.dtype { GgmlDType::F32 => deq::<f32>(&buffer, block_len, &mut out)?, GgmlDType::F16 => deq::<half::f16>(&buffer, block_len, &mut out)?, GgmlDType::Q4_0 => deq::<crate::quantized::BlockQ4_0>(&buffer, block_len, &mut out)?, GgmlDType::Q4_1 => deq::<crate::quantized::BlockQ4_1>(&buffer, block_len, &mut out)?, GgmlDType::Q5_0 => deq::<crate::quantized::BlockQ5_0>(&buffer, block_len, &mut out)?, GgmlDType::Q5_1 => deq::<crate::quantized::BlockQ5_1>(&buffer, block_len, &mut out)?, GgmlDType::Q8_0 => deq::<crate::quantized::BlockQ8_0>(&buffer, block_len, &mut out)?, GgmlDType::Q8_1 => deq::<crate::quantized::BlockQ8_1>(&buffer, block_len, &mut out)?, GgmlDType::Q2K => deq::<crate::quantized::BlockQ2K>(&buffer, block_len, &mut out)?, GgmlDType::Q3K => deq::<crate::quantized::BlockQ3K>(&buffer, block_len, &mut out)?, GgmlDType::Q4K => deq::<crate::quantized::BlockQ4K>(&buffer, block_len, &mut out)?, GgmlDType::Q5K => deq::<crate::quantized::BlockQ5K>(&buffer, block_len, &mut out)?, GgmlDType::Q6K => deq::<crate::quantized::BlockQ6K>(&buffer, block_len, &mut out)?, GgmlDType::Q8K => deq::<crate::quantized::BlockQ8K>(&buffer, block_len, &mut out)?, } self.device .storage_from_cpu_storage(&crate::CpuStorage::F32(out)) } pub fn dequantize_f16(&self, elem_count: usize) -> Result<CudaStorage> { dequantize_f16(&self.data, self.dtype, elem_count, self.device()) } pub fn quantize(&mut self, src: &CudaStorage) -> Result<()> { // Run the quantization on cpu. let src = match &src.slice { crate::cuda_backend::CudaStorageSlice::F32(data) => { self.device.dtoh_sync_copy(data).w()? } _ => crate::bail!("only f32 can be quantized"), }; let src_len = src.len(); let src = crate::Storage::Cpu(crate::CpuStorage::F32(src)); let mut qcpu_storage = crate::Device::Cpu.qzeros(src_len, self.dtype)?; qcpu_storage.quantize(&src)?; let data = qcpu_storage.data()?; let data = self.device.htod_sync_copy(data.as_ref()).w()?; self.data = data; Ok(()) } pub fn storage_size_in_bytes(&self) -> usize { self.data.len() } pub fn fwd( &self, self_shape: &crate::Shape, storage: &CudaStorage, layout: &crate::Layout, ) -> Result<(CudaStorage, crate::Shape)> { let max_bm = if FORCE_DMMV.load(std::sync::atomic::Ordering::Relaxed) { 1 } else { 8 }; let use_vec_kernel = match layout.shape().dims() { [b, m, _k] => b * m <= max_bm, [b, _k] => *b <= max_bm, _ => false, }; if use_vec_kernel { self.dequantize_matmul_vec(self_shape, storage, layout) } else { self.dequantize_matmul(self_shape, storage, layout) } } } impl QCudaStorage { fn dequantize_matmul_vec( &self, self_shape: &crate::Shape, rhs: &CudaStorage, rhs_l: &crate::Layout, ) -> Result<(CudaStorage, crate::Shape)> { let (nrows, ncols) = self_shape.dims2()?; let rhs = rhs.as_cuda_slice::<f32>()?; let rhs = match rhs_l.contiguous_offsets() { Some((o1, o2)) => rhs.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "dmmv" }.bt())?, }; let (b_size, k) = match rhs_l.shape().dims() { [b, m, k] => (b * m, *k), [b, k] => (*b, *k), _ => crate::bail!("unexpected rhs shape in dmmv {:?}", rhs_l.shape()), }; if ncols != k { crate::bail!("mismatch on matmul dim {self_shape:?} {:?}", rhs_l.shape()) } let out = if FORCE_DMMV.load(std::sync::atomic::Ordering::Relaxed) { dequantize_mul_mat_vec(&self.data, &rhs, self.dtype, ncols, nrows, self.device())? } else { mul_mat_vec_via_q8_1( &self.data, &rhs, self.dtype, ncols, nrows, b_size, self.device(), )? }; let mut out_shape = rhs_l.shape().dims().to_vec(); out_shape.pop(); out_shape.push(nrows); Ok((out, out_shape.into())) } fn dequantize_matmul( &self, self_shape: &crate::Shape, storage: &CudaStorage, layout: &crate::Layout, ) -> Result<(CudaStorage, crate::Shape)> { use crate::backend::BackendStorage; let (n, k) = self_shape.dims2()?; let (b, m, k2) = match layout.shape().dims() { &[b, m, k2] => (b, m, k2), &[m, k2] => (1, m, k2), s => crate::bail!("unexpected shape for input {s:?}"), }; if k2 != k { crate::bail!("mismatch on matmul dim {self_shape:?} {:?}", layout.shape()) } let out = if FORCE_DMMV.load(std::sync::atomic::Ordering::Relaxed) { let data_f32 = self.dequantize(n * k)?; let rhs_l = crate::Layout::new((k, n).into(), vec![1, k], 0).broadcast_as((b, k, n))?; storage.matmul(&data_f32, (b, m, n, k), layout, &rhs_l)? } else { let storage = storage.as_cuda_slice::<f32>()?; let storage = match layout.contiguous_offsets() { Some((o1, o2)) => storage.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "quantized-matmul", } .bt())?, }; mul_mat_via_q8_1( &self.data, &storage, self.dtype, /* x_rows */ n, /* x_cols */ k, /* y_rows */ k, /* y_cols */ b * m, self.device(), )? }; let mut out_shape = layout.shape().dims().to_vec(); out_shape.pop(); out_shape.push(n); Ok((out, out_shape.into())) } } pub fn load_quantized<T: super::GgmlType + Send + Sync + 'static>( device: &CudaDevice, data: &[T], ) -> Result<super::QStorage> { let data = unsafe { std::slice::from_raw_parts(data.as_ptr() as *const u8, core::mem::size_of_val(data)) }; let data = device.htod_sync_copy(data).w()?; Ok(QStorage::Cuda(QCudaStorage { data, device: device.clone(), dtype: T::DTYPE, })) } #[cfg(test)] mod test { use super::*; #[test] fn cuda_quantize_q8_1() -> Result<()> { let dev = CudaDevice::new(0)?; let el = 256; let el_padded = pad(el, MATRIX_ROW_PADDING); let y_size_in_bytes = el_padded * GgmlDType::Q8_1.type_size() / GgmlDType::Q8_1.block_size(); let mut y_q8_1 = unsafe { dev.alloc::<u8>(y_size_in_bytes).w()? }; let vs: Vec<f32> = (0..el).map(|v| v as f32).collect(); let y = dev.htod_sync_copy(&vs).w()?; quantize_q8_1(&y.slice(..), &mut y_q8_1, el, 1, &dev)?; Ok(()) } #[test] fn cuda_mmv_q8_1() -> Result<()> { let dev = CudaDevice::new(0)?; let ncols = 256; let vs: Vec<f32> = (0..ncols).map(|v| v as f32).collect(); let y = dev.htod_sync_copy(&vs).w()?; let mut xs = QCudaStorage::zeros(&dev, ncols, GgmlDType::Q4_0)?; xs.quantize(&CudaStorage::wrap_cuda_slice(y.clone(), dev.clone()))?; let cuda_storage = mul_mat_vec_via_q8_1( &xs.data, &y.slice(..), /* dtype */ GgmlDType::Q4_0, /* ncols */ ncols, /* nrows */ 1, /* b_size */ 1, &dev, )?; let vs = cuda_storage.as_cuda_slice::<f32>()?; let vs = dev.dtoh_sync_copy(&vs.slice(..)).unwrap(); assert_eq!(vs.len(), 1); // for n = 255, n.(n+1).(2n+1) / 6 = 5559680 // Q8 means 1/256 precision. assert_eq!(vs[0], 5561664.5); let cuda_storage = dequantize_mul_mat_vec( &xs.data, &y.slice(..), /* dtype */ GgmlDType::Q4_0, /* ncols */ ncols, /* nrows */ 1, &dev, )?; let vs = cuda_storage.as_cuda_slice::<f32>()?; let vs = dev.dtoh_sync_copy(&vs.slice(..)).unwrap(); assert_eq!(vs.len(), 1); assert_eq!(vs[0], 5561851.0); Ok(()) } #[test] fn cuda_mm_q8_1() -> Result<()> { let dev = CudaDevice::new(0)?; let ncols = 256; let vs: Vec<f32> = (0..ncols * 4).map(|v| v as f32 / 4.).collect(); let y = dev.htod_sync_copy(&vs).w()?; let mut xs = QCudaStorage::zeros(&dev, ncols * 4, GgmlDType::Q4_0)?; xs.quantize(&CudaStorage::wrap_cuda_slice(y.clone(), dev.clone()))?; let cuda_storage = mul_mat_via_q8_1( &xs.data, &y.slice(..), /* dtype */ GgmlDType::Q4_0, /* x_rows */ 4, /* x_cols */ ncols, /* y_rows */ ncols, /* y_cols */ 4, &dev, )?; let vs = cuda_storage.as_cuda_slice::<f32>()?; let vs = dev.dtoh_sync_copy(&vs.slice(..)).unwrap(); /* x = torch.tensor([float(v) for v in range(1024)]).reshape(4, 256) x @ x.t() / 16 tensor([[ 347480.0000, 869720.0000, 1391960.0000, 1914200.0000], [ 869720.0000, 2440536.0000, 4011352.0000, 5582166.5000], [ 1391960.0000, 4011352.0000, 6630742.0000, 9250132.0000], [ 1914200.0000, 5582166.5000, 9250132.0000, 12918099.0000]]) */ assert_eq!(vs.len(), 16); assert_eq!(vs[0], 347604.0); assert_eq!(vs[1], 888153.06); assert_eq!(vs[4], 869780.7); assert_eq!(vs[5], 2483145.0); assert_eq!(vs[11], 9407368.0); assert_eq!(vs[14], 9470856.0); assert_eq!(vs[15], 13138824.0); Ok(()) } }
candle/candle-core/src/quantized/cuda.rs/0
{ "file_path": "candle/candle-core/src/quantized/cuda.rs", "repo_id": "candle", "token_count": 13702 }
22
use crate::{Result, Shape, Tensor}; pub trait Dim: crate::shape::Dim + Copy {} impl<T: crate::shape::Dim + Copy> Dim for T {} /// A stream tensor is used in streaming module. It can either contain an actual tensor or be /// empty. #[derive(Clone)] pub struct StreamTensor(Option<Tensor>); impl std::fmt::Debug for StreamTensor { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.0 { Some(t) => write!(f, "{:?}", t.shape()), None => write!(f, "Empty"), } } } impl std::convert::From<Option<Tensor>> for StreamTensor { fn from(value: Option<Tensor>) -> Self { Self(value) } } impl std::convert::From<Tensor> for StreamTensor { fn from(value: Tensor) -> Self { Self(Some(value)) } } impl std::convert::From<()> for StreamTensor { fn from(_value: ()) -> Self { Self(None) } } impl StreamTensor { pub fn empty() -> Self { Self(None) } pub fn from_tensor(tensor: Tensor) -> Self { Self(Some(tensor)) } pub fn shape(&self) -> Option<&Shape> { self.0.as_ref().map(|t| t.shape()) } pub fn cat2<D: Dim>(&self, rhs: &Self, dim: D) -> Result<Self> { let xs = match (&self.0, &rhs.0) { (Some(lhs), Some(rhs)) => { let xs = Tensor::cat(&[lhs, rhs], dim)?; Some(xs) } (Some(xs), None) | (None, Some(xs)) => Some(xs.clone()), (None, None) => None, }; Ok(Self(xs)) } pub fn seq_len<D: Dim>(&self, dim: D) -> Result<usize> { match &self.0 { None => Ok(0), Some(v) => v.dim(dim), } } pub fn reset(&mut self) { self.0 = None } pub fn narrow<D: Dim>(&self, dim: D, offset: usize, len: usize) -> Result<StreamTensor> { let t = match &self.0 { None => None, Some(t) => { let seq_len = t.dim(dim)?; if seq_len <= offset { None } else { let t = t.narrow(dim, offset, usize::min(len, seq_len - offset))?; Some(t) } } }; Ok(Self(t)) } /// Splits the Streaming Tensor on the time axis `dim` with the first `lhs_len` elements /// returned in the first output and the remaining in the second output. pub fn split<D: Dim>(&self, dim: D, lhs_len: usize) -> Result<(Self, Self)> { match &self.0 { None => Ok((Self::empty(), Self::empty())), Some(t) => { let seq_len = t.dim(dim)?; let lhs_len = usize::min(seq_len, lhs_len); if lhs_len == 0 { Ok((Self::empty(), t.clone().into())) } else { let lhs = Self::from_tensor(t.narrow(dim, 0, lhs_len)?); let rhs_len = seq_len - lhs_len; let rhs = if rhs_len == 0 { Self::empty() } else { Self::from_tensor(t.narrow(dim, lhs_len, rhs_len)?) }; Ok((lhs, rhs)) } } } } pub fn as_option(&self) -> Option<&Tensor> { self.0.as_ref() } pub fn apply<M: crate::Module>(&self, m: &M) -> Result<Self> { match &self.0 { None => Ok(Self::empty()), Some(t) => Ok(Self::from_tensor(t.apply(m)?)), } } } /// Streaming modules take as input a stream tensor and return a stream tensor. They may perform /// some internal buffering so that enough data has been received for the module to be able to /// perform some operations. pub trait StreamingModule { // TODO: Should we also have a flush method? fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor>; fn reset_state(&mut self); } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum BinOp { Add, Mul, Sub, Div, } #[derive(Debug, Clone)] pub struct StreamingBinOp { prev_lhs: StreamTensor, prev_rhs: StreamTensor, pub op: BinOp, pub dim: crate::D, } impl StreamingBinOp { pub fn new(op: BinOp, dim: crate::D) -> Self { Self { prev_lhs: StreamTensor::empty(), prev_rhs: StreamTensor::empty(), op, dim, } } pub fn reset_state(&mut self) { self.prev_lhs.reset(); self.prev_rhs.reset(); } pub fn forward(&self, lhs: &Tensor, rhs: &Tensor) -> Result<Tensor> { match self.op { BinOp::Add => Tensor::add(lhs, rhs), BinOp::Mul => Tensor::mul(lhs, rhs), BinOp::Sub => Tensor::sub(lhs, rhs), BinOp::Div => Tensor::div(lhs, rhs), } } pub fn step(&mut self, lhs: &StreamTensor, rhs: &StreamTensor) -> Result<StreamTensor> { let lhs = StreamTensor::cat2(&self.prev_lhs, lhs, self.dim)?; let rhs = StreamTensor::cat2(&self.prev_rhs, rhs, self.dim)?; let lhs_len = lhs.seq_len(self.dim)?; let rhs_len = rhs.seq_len(self.dim)?; let common_len = usize::min(lhs_len, rhs_len); let (lhs, prev_lhs) = lhs.split(self.dim, common_len)?; let (rhs, prev_rhs) = rhs.split(self.dim, common_len)?; let ys = match (lhs.0, rhs.0) { (Some(lhs), Some(rhs)) => { let ys = self.forward(&lhs, &rhs)?; StreamTensor::from_tensor(ys) } (None, None) => StreamTensor::empty(), (lhs, rhs) => crate::bail!("INTERNAL ERROR inconsistent lhs and rhs {lhs:?} {rhs:?}"), }; self.prev_lhs = prev_lhs; self.prev_rhs = prev_rhs; Ok(ys) } } /// Simple wrapper that doesn't do any buffering. pub struct Map<T: crate::Module>(T); impl<T: crate::Module> StreamingModule for Map<T> { fn reset_state(&mut self) {} fn step(&mut self, xs: &StreamTensor) -> Result<StreamTensor> { xs.apply(&self.0) } }
candle/candle-core/src/streaming.rs/0
{ "file_path": "candle/candle-core/src/streaming.rs", "repo_id": "candle", "token_count": 3115 }
23
use candle_core::{test_device, test_utils, Device, IndexOp, Result, Tensor}; // https://github.com/huggingface/candle/issues/364 fn avg_pool2d(dev: &Device) -> Result<()> { let data: Vec<f32> = vec![ 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 4, 4), dev)?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[0.5f32, 1.], [1., 1.]]); let data: Vec<f32> = vec![ 1., 2., 1., 3., 0., 0., 1., 1., 1., 1., 1., 1., 5., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 2, 8), dev)?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[5. / 4., 6. / 4., 6. / 4., 1.]]); Ok(()) } fn max_pool2d(dev: &Device) -> Result<()> { let data: Vec<f32> = vec![ 1., 2., 1., 3., 0., 0., 1., 1., 1., 1., 1., 1., 5., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 4, 4), dev)?; let pool = t.max_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[2f32, 3.], [5., 1.]]); let t = t.reshape((1, 1, 2, 8))?; let pool = t.max_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[2.0, 3.0, 5.0, 1.0]]); Ok(()) } /* This test corresponds to the following PyTorch script. import torch torch.manual_seed(4242) t = torch.randn((1, 2, 4, 4)) print(t.flatten()) res = torch.nn.functional.avg_pool2d(t, 2) print(res) */ fn avg_pool2d_pytorch(dev: &Device) -> Result<()> { if dev.is_metal() { return Ok(()); } let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, ], dev, )? .reshape((1, 2, 4, 4))?; let pool = t.avg_pool2d(2)?.squeeze(0)?; assert_eq!( test_utils::to_vec3_round(&pool, 4)?, [ [[-1.1926, -0.0395], [0.2688, 0.1871]], [[0.1835, -0.1606], [0.6249, 0.3217]] ] ); let pool = t.avg_pool2d(3)?.squeeze(0)?; assert_eq!( test_utils::to_vec3_round(&pool, 4)?, [[[0.085]], [[0.0078]]] ); let t = t.reshape((1, 1, 4, 8))?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!( test_utils::to_vec2_round(&pool, 4)?, [ [0.7745, 0.0276, -1.6983, 0.12], [0.3542, 0.1625, 0.4542, -0.0014] ] ); Ok(()) } fn upsample_nearest2d(dev: &Device) -> Result<()> { let t = Tensor::arange(0f32, 6f32, dev)?.reshape((1, 1, 2, 3))?; let upsampled = t.upsample_nearest2d(4, 6)?.i(0)?.i(0)?; assert_eq!( t.i(0)?.i(0)?.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]] ); assert_eq!( upsampled.to_vec2::<f32>()?, [ [0.0, 0.0, 1.0, 1.0, 2.0, 2.0], [0.0, 0.0, 1.0, 1.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0, 5.0, 5.0], [3.0, 3.0, 4.0, 4.0, 5.0, 5.0] ] ); Ok(()) } test_device!(avg_pool2d, avg_pool2d_cpu, avg_pool2d_gpu, avg_pool2d_metal); test_device!( avg_pool2d_pytorch, avg_pool2d_pytorch_cpu, avg_pool2d_pytorch_gpu, avg_pool2d_pytorch_metal ); test_device!(max_pool2d, max_pool2d_cpu, max_pool2d_gpu, max_pool2d_metal); test_device!( upsample_nearest2d, upsample_nearest2d_cpu, upsample_nearest2d_gpu, upsample_nearest2d_metal );
candle/candle-core/tests/pool_tests.rs/0
{ "file_path": "candle/candle-core/tests/pool_tests.rs", "repo_id": "candle", "token_count": 2112 }
24
//! Helper functions for the tinystories dataset. This uses the pre-tokenized version as generated //! by the tools from https://github.com/karpathy/llama2.c use candle::{Device, Result, Tensor}; pub struct Dataset { valid_tokens: Vec<memmap2::Mmap>, train_tokens: Vec<memmap2::Mmap>, } fn mmap_file(p: &std::path::PathBuf) -> Result<memmap2::Mmap> { let file = std::fs::File::open(p)?; let mmap = unsafe { memmap2::MmapOptions::new().map(&file)? }; Ok(mmap) } impl Dataset { pub fn new<P: AsRef<std::path::Path>>(dir: P) -> Result<Self> { let dir = dir.as_ref(); let mut bin_files = vec![]; for file in std::fs::read_dir(dir)?.flatten() { let file = file.path(); if let Some(extension) = file.extension() { if extension == "bin" { bin_files.push(file) } } } if bin_files.len() < 2 { candle::bail!("found less than two bin files in {:?}", dir) } bin_files.sort(); let valid_tokens = mmap_file(&bin_files[0])?; let train_tokens = bin_files[1..] .iter() .map(mmap_file) .collect::<Result<Vec<_>>>()?; Ok(Self { valid_tokens: vec![valid_tokens], train_tokens, }) } pub fn train_tokens(&self) -> usize { self.train_tokens.len() } pub fn valid_tokens(&self) -> usize { self.valid_tokens.len() } } pub struct DatasetRandomIter<'a> { all_tokens: &'a [memmap2::Mmap], tokens: Vec<&'a memmap2::Mmap>, current_tokens: &'a memmap2::Mmap, indexes_in_bytes: Vec<usize>, seq_len: usize, device: Device, } impl<'a> DatasetRandomIter<'a> { pub fn new(ds: &'a Dataset, valid: bool, seq_len: usize, device: Device) -> Self { use rand::seq::SliceRandom; use rand::thread_rng; let all_tokens = if valid { &ds.valid_tokens } else { &ds.train_tokens }; let mut tokens = all_tokens.iter().collect::<Vec<_>>(); tokens.shuffle(&mut thread_rng()); let current_tokens = tokens.pop().unwrap(); let seq_len_in_bytes = seq_len * 2; let mut indexes_in_bytes = (0..current_tokens.len() - seq_len_in_bytes) .step_by(seq_len_in_bytes) .collect::<Vec<_>>(); indexes_in_bytes.shuffle(&mut thread_rng()); Self { all_tokens, tokens, current_tokens, indexes_in_bytes, seq_len, device, } } } impl<'a> Iterator for DatasetRandomIter<'a> { type Item = Result<(Tensor, Tensor)>; fn next(&mut self) -> Option<Self::Item> { use byteorder::{LittleEndian, ReadBytesExt}; use rand::seq::SliceRandom; use rand::thread_rng; let seq_len = self.seq_len; if self.indexes_in_bytes.is_empty() { if self.tokens.is_empty() { self.tokens = self.all_tokens.iter().collect(); self.tokens.shuffle(&mut thread_rng()); } self.current_tokens = self.tokens.pop().unwrap(); let seq_len_in_bytes = self.seq_len * 2; self.indexes_in_bytes = (0..self.current_tokens.len() - seq_len_in_bytes) .step_by(seq_len_in_bytes) .collect::<Vec<_>>(); self.indexes_in_bytes.shuffle(&mut thread_rng()); } let start_idx = self.indexes_in_bytes.pop().unwrap(); let bytes = &self.current_tokens[start_idx..start_idx + 2 * (seq_len + 1)]; let mut tokens = vec![0u16; bytes.len() / 2]; if let Err(err) = std::io::Cursor::new(bytes).read_u16_into::<LittleEndian>(&mut tokens) { return Some(Err(err.into())); } let tokens = tokens.into_iter().map(|v| v as u32).collect::<Vec<_>>(); let inputs = Tensor::new(&tokens[..seq_len], &self.device); let targets = Tensor::new(&tokens[1..], &self.device); Some(candle::error::zip(inputs, targets)) } }
candle/candle-datasets/src/nlp/tinystories.rs/0
{ "file_path": "candle/candle-datasets/src/nlp/tinystories.rs", "repo_id": "candle", "token_count": 2097 }
25
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::Parser; use candle::{DType, Device, Result, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::models::blip; use candle_transformers::models::quantized_blip; use tokenizers::Tokenizer; enum Model { M(blip::BlipForConditionalGeneration), Q(quantized_blip::BlipForConditionalGeneration), } impl Model { fn text_decoder_forward(&mut self, xs: &Tensor, img_xs: &Tensor) -> Result<Tensor> { match self { Self::M(m) => m.text_decoder().forward(xs, img_xs), Self::Q(m) => m.text_decoder().forward(xs, img_xs), } } } // TODO: Maybe add support for the conditional prompt. #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] tokenizer: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Use the quantized version of the model. #[arg(long)] quantized: bool, } const SEP_TOKEN_ID: u32 = 102; /// Loads an image from disk using the image crate, this returns a tensor with shape /// (3, 384, 384). OpenAI normalization is applied. pub fn load_image<P: AsRef<std::path::Path>>(p: P) -> Result<Tensor> { let img = image::ImageReader::open(p)? .decode() .map_err(candle::Error::wrap)? .resize_to_fill(384, 384, image::imageops::FilterType::Triangle); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (384, 384, 3), &Device::Cpu)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.48145466f32, 0.4578275, 0.40821073], &Device::Cpu)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.26862954f32, 0.261_302_6, 0.275_777_1], &Device::Cpu)? .reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let model_file = match args.model { None => { let api = hf_hub::api::sync::Api::new()?; if args.quantized { let api = api.model("lmz/candle-blip".to_string()); api.get("blip-image-captioning-large-q4k.gguf")? } else { let api = api.repo(hf_hub::Repo::with_revision( "Salesforce/blip-image-captioning-large".to_string(), hf_hub::RepoType::Model, "refs/pr/18".to_string(), )); api.get("model.safetensors")? } } Some(model) => model.into(), }; let tokenizer = match args.tokenizer { None => { let api = hf_hub::api::sync::Api::new()?; let api = api.model("Salesforce/blip-image-captioning-large".to_string()); api.get("tokenizer.json")? } Some(file) => file.into(), }; let tokenizer = Tokenizer::from_file(tokenizer).map_err(E::msg)?; let mut tokenizer = TokenOutputStream::new(tokenizer); let mut logits_processor = candle_transformers::generation::LogitsProcessor::new(1337, None, None); let config = blip::Config::image_captioning_large(); let device = candle_examples::device(args.cpu)?; let (image_embeds, device, mut model) = if args.quantized { let device = Device::Cpu; let image = load_image(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let vb = quantized_blip::VarBuilder::from_gguf(model_file, &device)?; let model = quantized_blip::BlipForConditionalGeneration::new(&config, vb)?; let image_embeds = image.unsqueeze(0)?.apply(model.vision_model())?; (image_embeds, device, Model::Q(model)) } else { let image = load_image(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = blip::BlipForConditionalGeneration::new(&config, vb)?; let image_embeds = image.unsqueeze(0)?.apply(model.vision_model())?; (image_embeds, device, Model::M(model)) }; let mut token_ids = vec![30522u32]; for index in 0..1000 { let context_size = if index > 0 { 1 } else { token_ids.len() }; let start_pos = token_ids.len().saturating_sub(context_size); let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?; let logits = model.text_decoder_forward(&input_ids, &image_embeds)?; let logits = logits.squeeze(0)?; let logits = logits.get(logits.dim(0)? - 1)?; let token = logits_processor.sample(&logits)?; if token == SEP_TOKEN_ID { break; } token_ids.push(token); if let Some(t) = tokenizer.next_token(token)? { use std::io::Write; print!("{t}"); std::io::stdout().flush()?; } } if let Some(rest) = tokenizer.decode_rest().map_err(E::msg)? { print!("{rest}"); } println!(); Ok(()) }
candle/candle-examples/examples/blip/main.rs/0
{ "file_path": "candle/candle-examples/examples/blip/main.rs", "repo_id": "candle", "token_count": 2436 }
26
// TODO: Add an offline mode. #[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::{Error as E, Result}; use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use clap::Parser; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; use candle_transformers::models::falcon::{Config, Falcon}; struct TextGeneration { model: Falcon, device: Device, tokenizer: Tokenizer, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, } struct GenerationOptions { temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, } impl TextGeneration { fn new( model: Falcon, tokenizer: Tokenizer, generation_options: GenerationOptions, seed: u64, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, generation_options.temp, generation_options.top_p); let repeat_penalty = generation_options.repeat_penalty; let repeat_last_n = generation_options.repeat_last_n; Self { model, tokenizer, logits_processor, device: device.clone(), repeat_penalty, repeat_last_n, } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { println!("starting the inference loop"); let mut tokens = self .tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let mut new_tokens = vec![]; let start_gen = std::time::Instant::now(); for index in 0..sample_len { let start_gen = std::time::Instant::now(); let context_size = if self.model.config().use_cache && index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = self.model.forward(&input)?; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); new_tokens.push(next_token); println!("> {:?}", start_gen.elapsed()); println!( "{} token: {} '{}'", index + 1, next_token, self.tokenizer.decode(&[next_token], true).map_err(E::msg)? ); } let dt = start_gen.elapsed(); println!( "{sample_len} tokens generated ({} token/s)\n----\n{}\n----", sample_len as f64 / dt.as_secs_f64(), self.tokenizer.decode(&new_tokens, true).map_err(E::msg)? ); Ok(()) } } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(long)] prompt: String, /// Use f32 computations rather than bf16. #[arg(long)] use_f32: bool, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, default_value_t = 100)] sample_len: usize, #[arg(long, default_value = "tiiuae/falcon-7b")] model_id: String, #[arg(long, default_value = "refs/pr/43")] revision: String, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.0)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, } fn main() -> Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let start = std::time::Instant::now(); let api = Api::new()?; let repo = api.repo(Repo::with_revision( args.model_id, RepoType::Model, args.revision, )); let tokenizer_filename = repo.get("tokenizer.json")?; let filenames = candle_examples::hub_load_safetensors(&repo, "model.safetensors.index.json")?; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let dtype = if args.use_f32 { DType::F32 } else { DType::BF16 }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; let config = Config::falcon7b(); config.validate()?; let model = Falcon::load(vb, config)?; println!("loaded the model in {:?}", start.elapsed()); let generation_options = GenerationOptions { temp: args.temperature, top_p: args.top_p, repeat_penalty: args.repeat_penalty, repeat_last_n: args.repeat_last_n, }; let mut pipeline = TextGeneration::new(model, tokenizer, generation_options, args.seed, &device); pipeline.run(&args.prompt, args.sample_len)?; Ok(()) }
candle/candle-examples/examples/falcon/main.rs/0
{ "file_path": "candle/candle-examples/examples/falcon/main.rs", "repo_id": "candle", "token_count": 2723 }
27
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle_transformers::models::jina_bert::{BertModel, Config, PositionEmbeddingType}; use anyhow::Error as E; use candle::{DType, Module, Tensor}; use candle_nn::VarBuilder; use clap::Parser; #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// When set, compute embeddings for this prompt. #[arg(long)] prompt: Option<String>, /// The number of times to run the prompt. #[arg(long, default_value = "1")] n: usize, /// L2 normalization for embeddings. #[arg(long, default_value = "true")] normalize_embeddings: bool, #[arg(long)] tokenizer: Option<String>, #[arg(long)] model: Option<String>, #[arg(long)] model_file: Option<String>, } impl Args { fn build_model_and_tokenizer(&self) -> anyhow::Result<(BertModel, tokenizers::Tokenizer)> { use hf_hub::{api::sync::Api, Repo, RepoType}; let model_name = match self.model.as_ref() { Some(model) => model.to_string(), None => "jinaai/jina-embeddings-v2-base-en".to_string(), }; let model = match &self.model_file { Some(model_file) => std::path::PathBuf::from(model_file), None => Api::new()? .repo(Repo::new(model_name.to_string(), RepoType::Model)) .get("model.safetensors")?, }; let tokenizer = match &self.tokenizer { Some(file) => std::path::PathBuf::from(file), None => Api::new()? .repo(Repo::new(model_name.to_string(), RepoType::Model)) .get("tokenizer.json")?, }; let device = candle_examples::device(self.cpu)?; let tokenizer = tokenizers::Tokenizer::from_file(tokenizer).map_err(E::msg)?; let config = Config::new( tokenizer.get_vocab_size(true), 768, 12, 12, 3072, candle_nn::Activation::Gelu, 8192, 2, 0.02, 1e-12, 0, PositionEmbeddingType::Alibi, ); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? }; let model = BertModel::new(vb, &config)?; Ok((model, tokenizer)) } } fn main() -> anyhow::Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { println!("tracing..."); let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; let start = std::time::Instant::now(); let (model, mut tokenizer) = args.build_model_and_tokenizer()?; let device = &model.device; if let Some(prompt) = args.prompt { let tokenizer = tokenizer .with_padding(None) .with_truncation(None) .map_err(E::msg)?; let tokens = tokenizer .encode(prompt, true) .map_err(E::msg)? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; println!("Loaded and encoded {:?}", start.elapsed()); let start = std::time::Instant::now(); let embeddings = model.forward(&token_ids)?; let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; println!("pooled_embeddigns: {embeddings}"); let embeddings = if args.normalize_embeddings { normalize_l2(&embeddings)? } else { embeddings }; if args.normalize_embeddings { println!("normalized_embeddings: {embeddings}"); } println!("Took {:?}", start.elapsed()); } else { let sentences = [ "The cat sits outside", "A man is playing guitar", "I love pasta", "The new movie is awesome", "The cat plays in the garden", "A woman watches TV", "The new movie is so great", "Do you like pizza?", ]; let n_sentences = sentences.len(); if let Some(pp) = tokenizer.get_padding_mut() { pp.strategy = tokenizers::PaddingStrategy::BatchLongest } else { let pp = tokenizers::PaddingParams { strategy: tokenizers::PaddingStrategy::BatchLongest, ..Default::default() }; tokenizer.with_padding(Some(pp)); } let tokens = tokenizer .encode_batch(sentences.to_vec(), true) .map_err(E::msg)?; let token_ids = tokens .iter() .map(|tokens| { let tokens = tokens.get_ids().to_vec(); Tensor::new(tokens.as_slice(), device) }) .collect::<candle::Result<Vec<_>>>()?; let token_ids = Tensor::stack(&token_ids, 0)?; println!("running inference on batch {:?}", token_ids.shape()); let embeddings = model.forward(&token_ids)?; println!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if args.normalize_embeddings { normalize_l2(&embeddings)? } else { embeddings }; println!("pooled embeddings {:?}", embeddings.shape()); let mut similarities = vec![]; for i in 0..n_sentences { let e_i = embeddings.get(i)?; for j in (i + 1)..n_sentences { let e_j = embeddings.get(j)?; let sum_ij = (&e_i * &e_j)?.sum_all()?.to_scalar::<f32>()?; let sum_i2 = (&e_i * &e_i)?.sum_all()?.to_scalar::<f32>()?; let sum_j2 = (&e_j * &e_j)?.sum_all()?.to_scalar::<f32>()?; let cosine_similarity = sum_ij / (sum_i2 * sum_j2).sqrt(); similarities.push((cosine_similarity, i, j)) } } similarities.sort_by(|u, v| v.0.total_cmp(&u.0)); for &(score, i, j) in similarities[..5].iter() { println!("score: {score:.2} '{}' '{}'", sentences[i], sentences[j]) } } Ok(()) } pub fn normalize_l2(v: &Tensor) -> candle::Result<Tensor> { v.broadcast_div(&v.sqr()?.sum_keepdim(1)?.sqrt()?) }
candle/candle-examples/examples/jina-bert/main.rs/0
{ "file_path": "candle/candle-examples/examples/jina-bert/main.rs", "repo_id": "candle", "token_count": 3414 }
28
# candle-marian-mt `marian-mt` is a neural machine translation model. In this example it is used to translate text from French to English. See the associated [model card](https://huggingface.co/Helsinki-NLP/opus-mt-tc-big-fr-en) for details on the model itself. ## Running an example ```bash cargo run --example marian-mt --release -- \ --text "Demain, dès l'aube, à l'heure où blanchit la campagne, Je partirai. Vois-tu, je sais que tu m'attends. J'irai par la forêt, j'irai par la montagne. Je ne puis demeurer loin de toi plus longtemps." ``` ``` <NIL> Tomorrow, at dawn, at the time when the country is whitening, I will go. See, I know you are waiting for me. I will go through the forest, I will go through the mountain. I cannot stay far from you any longer.</s> ``` ## Generating the tokenizer.json files You can use the following script to generate the `tokenizer.json` config files from the hf-hub repos. This requires the `tokenizers` and `sentencepiece` packages to be install and use the `convert_slow_tokenizer.py` script from this directory. ```python from convert_slow_tokenizer import MarianConverter from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-fr-en", use_fast=False) fast_tokenizer = MarianConverter(tokenizer, index=0).converted() fast_tokenizer.save(f"tokenizer-marian-base-fr.json") fast_tokenizer = MarianConverter(tokenizer, index=1).converted() fast_tokenizer.save(f"tokenizer-marian-base-en.json") ```
candle/candle-examples/examples/marian-mt/README.md/0
{ "file_path": "candle/candle-examples/examples/marian-mt/README.md", "repo_id": "candle", "token_count": 497 }
29
# candle-moondream [Moondream](https://github.com/vikhyat/moondream) is a computer-vision model can answer real-world questions about images. It's tiny by today's models, with only 1.6B parameters. That enables it to run on a variety of devices, including mobile phones and edge devices. ## Running some examples First download an example image ```bash $ wget https://raw.githubusercontent.com/vikhyat/moondream/main/assets/demo-1.jpg ``` <img src="https://raw.githubusercontent.com/vikhyat/moondream/main/assets/demo-1.jpg" width="200"> Now you can run Moondream from the `candle-examples` crate: ```bash $ cargo run --example moondream --release -- --prompt "What is the girl eating?" --image "./demo-1.jpg" avavx: false, neon: true, simd128: false, f16c: false temp: 0.00 repeat-penalty: 1.00 repeat-last-n: 64 retrieved the files in 3.395583ms Running on CPU, to run on GPU(metal), build this example with `--features metal` loaded the model in 5.485493792s loaded and encoded the image Tensor[dims 3, 378, 378; f32] in 4.801396417s starting the inference loop The girl is eating a hamburger.< 9 tokens generated (0.68 token/s) ```
candle/candle-examples/examples/moondream/README.md/0
{ "file_path": "candle/candle-examples/examples/moondream/README.md", "repo_id": "candle", "token_count": 367 }
30
use super::gym_env::{GymEnv, Step}; use candle::{DType, Device, Error, Module, Result, Tensor}; use candle_nn::{ linear, ops::log_softmax, ops::softmax, sequential::seq, Activation, AdamW, Optimizer, ParamsAdamW, VarBuilder, VarMap, }; use rand::{distributions::Distribution, rngs::ThreadRng, Rng}; fn new_model( input_shape: &[usize], num_actions: usize, dtype: DType, device: &Device, ) -> Result<(impl Module, VarMap)> { let input_size = input_shape.iter().product(); let mut varmap = VarMap::new(); let var_builder = VarBuilder::from_varmap(&varmap, dtype, device); let model = seq() .add(linear(input_size, 32, var_builder.pp("lin1"))?) .add(Activation::Relu) .add(linear(32, num_actions, var_builder.pp("lin2"))?); Ok((model, varmap)) } fn accumulate_rewards(steps: &[Step<i64>]) -> Vec<f64> { let mut rewards: Vec<f64> = steps.iter().map(|s| s.reward).collect(); let mut acc_reward = 0f64; for (i, reward) in rewards.iter_mut().enumerate().rev() { if steps[i].terminated { acc_reward = 0.0; } acc_reward += *reward; *reward = acc_reward; } rewards } fn weighted_sample(probs: Vec<f32>, rng: &mut ThreadRng) -> Result<usize> { let distribution = rand::distributions::WeightedIndex::new(probs).map_err(Error::wrap)?; let mut rng = rng; Ok(distribution.sample(&mut rng)) } pub fn run() -> Result<()> { let env = GymEnv::new("CartPole-v1")?; println!("action space: {:?}", env.action_space()); println!("observation space: {:?}", env.observation_space()); let (model, varmap) = new_model( env.observation_space(), env.action_space(), DType::F32, &Device::Cpu, )?; let optimizer_params = ParamsAdamW { lr: 0.01, weight_decay: 0.01, ..Default::default() }; let mut optimizer = AdamW::new(varmap.all_vars(), optimizer_params)?; let mut rng = rand::thread_rng(); for epoch_idx in 0..100 { let mut state = env.reset(rng.gen::<u64>())?; let mut steps: Vec<Step<i64>> = vec![]; loop { let action = { let action_probs: Vec<f32> = softmax(&model.forward(&state.detach().unsqueeze(0)?)?, 1)? .squeeze(0)? .to_vec1()?; weighted_sample(action_probs, &mut rng)? as i64 }; let step = env.step(action)?; steps.push(step.copy_with_obs(&state)); if step.terminated || step.truncated { state = env.reset(rng.gen::<u64>())?; if steps.len() > 5000 { break; } } else { state = step.state; } } let total_reward: f64 = steps.iter().map(|s| s.reward).sum(); let episodes: i64 = steps .iter() .map(|s| (s.terminated || s.truncated) as i64) .sum(); println!( "epoch: {:<3} episodes: {:<5} avg reward per episode: {:.2}", epoch_idx, episodes, total_reward / episodes as f64 ); let batch_size = steps.len(); let rewards = Tensor::from_vec(accumulate_rewards(&steps), batch_size, &Device::Cpu)? .to_dtype(DType::F32)? .detach(); let actions_mask = { let actions: Vec<i64> = steps.iter().map(|s| s.action).collect(); let actions_mask: Vec<Tensor> = actions .iter() .map(|&action| { // One-hot encoding let mut action_mask = vec![0.0; env.action_space()]; action_mask[action as usize] = 1.0; Tensor::from_vec(action_mask, env.action_space(), &Device::Cpu) .unwrap() .to_dtype(DType::F32) .unwrap() }) .collect(); Tensor::stack(&actions_mask, 0)?.detach() }; let states = { let states: Vec<Tensor> = steps.into_iter().map(|s| s.state).collect(); Tensor::stack(&states, 0)?.detach() }; let log_probs = actions_mask .mul(&log_softmax(&model.forward(&states)?, 1)?)? .sum(1)?; let loss = rewards.mul(&log_probs)?.neg()?.mean_all()?; optimizer.backward_step(&loss)?; } Ok(()) }
candle/candle-examples/examples/reinforcement-learning/policy_gradient.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/policy_gradient.rs", "repo_id": "candle", "token_count": 2333 }
31
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Error as E; use clap::{Parser, ValueEnum}; use candle::{DType, Tensor}; use candle_examples::token_output_stream::TokenOutputStream; use candle_nn::VarBuilder; use candle_transformers::models::{trocr, vit}; use tokenizers::Tokenizer; mod image_processor; #[derive(Clone, Debug, Copy, ValueEnum)] enum Which { #[value(name = "base")] BaseHandwritten, #[value(name = "large")] LargeHandwritten, BasePrinted, LargePrinted, } impl Which { fn repo_and_branch_name(&self) -> (&str, &str) { match self { Self::BaseHandwritten => ("microsoft/trocr-base-handwritten", "refs/pr/3"), Self::LargeHandwritten => ("microsoft/trocr-large-handwritten", "refs/pr/6"), Self::BasePrinted => ("microsoft/trocr-base-printed", "refs/pr/7"), Self::LargePrinted => ("microsoft/trocr-large-printed", "main"), } } } #[derive(Debug, Clone, serde::Deserialize)] struct Config { encoder: vit::Config, decoder: trocr::TrOCRConfig, } #[derive(Parser, Debug)] struct Args { #[arg(long)] model: Option<String>, /// Choose the variant of the model to run. #[arg(long, default_value = "base")] which: Which, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// The image file to be processed. #[arg(long)] image: String, /// Tokenization config. #[arg(long)] tokenizer: Option<String>, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let api = hf_hub::api::sync::Api::new()?; let mut tokenizer_dec = { let tokenizer_file = match args.tokenizer { None => api .model(String::from("ToluClassics/candle-trocr-tokenizer")) .get("tokenizer.json")?, Some(tokenizer) => std::path::PathBuf::from(tokenizer), }; let tokenizer = Tokenizer::from_file(&tokenizer_file).map_err(E::msg)?; TokenOutputStream::new(tokenizer) }; let device = candle_examples::device(args.cpu)?; let vb = { let model = match args.model { Some(model) => std::path::PathBuf::from(model), None => { let (repo, branch) = args.which.repo_and_branch_name(); api.repo(hf_hub::Repo::with_revision( repo.to_string(), hf_hub::RepoType::Model, branch.to_string(), )) .get("model.safetensors")? } }; println!("model: {:?}", model); unsafe { VarBuilder::from_mmaped_safetensors(&[model], DType::F32, &device)? } }; let (encoder_config, decoder_config) = { let (repo, branch) = args.which.repo_and_branch_name(); let config_filename = api .repo(hf_hub::Repo::with_revision( repo.to_string(), hf_hub::RepoType::Model, branch.to_string(), )) .get("config.json")?; let config: Config = serde_json::from_reader(std::fs::File::open(config_filename)?)?; (config.encoder, config.decoder) }; let mut model = trocr::TrOCRModel::new(&encoder_config, &decoder_config, vb)?; let processor_config = image_processor::ProcessorConfig::default(); let processor = image_processor::ViTImageProcessor::new(&processor_config); let image = vec![args.image.as_str()]; let image = processor.preprocess(image)?.to_device(&device)?; let encoder_xs = model.encoder().forward(&image)?; let mut logits_processor = candle_transformers::generation::LogitsProcessor::new(1337, None, None); let mut token_ids: Vec<u32> = vec![decoder_config.decoder_start_token_id]; for index in 0..1000 { let context_size = if index >= 1 { 1 } else { token_ids.len() }; let start_pos = token_ids.len().saturating_sub(context_size); let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?; let logits = model.decode(&input_ids, &encoder_xs, start_pos)?; let logits = logits.squeeze(0)?; let logits = logits.get(logits.dim(0)? - 1)?; let token = logits_processor.sample(&logits)?; token_ids.push(token); if let Some(t) = tokenizer_dec.next_token(token)? { use std::io::Write; print!("{t}"); std::io::stdout().flush()?; } if token == decoder_config.eos_token_id { break; } } if let Some(rest) = tokenizer_dec.decode_rest().map_err(E::msg)? { print!("{rest}"); } println!(); Ok(()) }
candle/candle-examples/examples/trocr/main.rs/0
{ "file_path": "candle/candle-examples/examples/trocr/main.rs", "repo_id": "candle", "token_count": 2167 }
32
// Copied from https://github.com/ruuda/bs1770/blob/master/src/lib.rs // BS1770 -- Loudness analysis library conforming to ITU-R BS.1770 // Copyright 2020 Ruud van Asseldonk // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // A copy of the License has been included in the root of the repository. //! Loudness analysis conforming to [ITU-R BS.1770-4][bs17704]. //! //! This library offers the building blocks to perform BS.1770 loudness //! measurements, but you need to put the pieces together yourself. //! //! [bs17704]: https://www.itu.int/rec/R-REC-BS.1770-4-201510-I/en //! //! # Stereo integrated loudness example //! //! ```ignore //! # fn load_stereo_audio() -> [Vec<i16>; 2] { //! # [vec![0; 48_000], vec![0; 48_000]] //! # } //! # //! let sample_rate_hz = 44_100; //! let bits_per_sample = 16; //! let channel_samples: [Vec<i16>; 2] = load_stereo_audio(); //! //! // When converting integer samples to float, note that the maximum amplitude //! // is `1 << (bits_per_sample - 1)`, one bit is the sign bit. //! let normalizer = 1.0 / (1_u64 << (bits_per_sample - 1)) as f32; //! //! let channel_power: Vec<_> = channel_samples.iter().map(|samples| { //! let mut meter = bs1770::ChannelLoudnessMeter::new(sample_rate_hz); //! meter.push(samples.iter().map(|&s| s as f32 * normalizer)); //! meter.into_100ms_windows() //! }).collect(); //! //! let stereo_power = bs1770::reduce_stereo( //! channel_power[0].as_ref(), //! channel_power[1].as_ref(), //! ); //! //! let gated_power = bs1770::gated_mean( //! stereo_power.as_ref() //! ).unwrap_or(bs1770::Power(0.0)); //! println!("Integrated loudness: {:.1} LUFS", gated_power.loudness_lkfs()); //! ``` use std::f32; /// Coefficients for a 2nd-degree infinite impulse response filter. /// /// Coefficient a0 is implicitly 1.0. #[derive(Clone)] struct Filter { a1: f32, a2: f32, b0: f32, b1: f32, b2: f32, // The past two input and output samples. x1: f32, x2: f32, y1: f32, y2: f32, } impl Filter { /// Stage 1 of th BS.1770-4 pre-filter. pub fn high_shelf(sample_rate_hz: f32) -> Filter { // Coefficients taken from https://github.com/csteinmetz1/pyloudnorm/blob/ // 6baa64d59b7794bc812e124438692e7fd2e65c0c/pyloudnorm/meter.py#L135-L136. let gain_db = 3.999_843_8; let q = 0.707_175_25; let center_hz = 1_681.974_5; // Formula taken from https://github.com/csteinmetz1/pyloudnorm/blob/ // 6baa64d59b7794bc812e124438692e7fd2e65c0c/pyloudnorm/iirfilter.py#L134-L143. let k = (f32::consts::PI * center_hz / sample_rate_hz).tan(); let vh = 10.0_f32.powf(gain_db / 20.0); let vb = vh.powf(0.499_666_78); let a0 = 1.0 + k / q + k * k; Filter { b0: (vh + vb * k / q + k * k) / a0, b1: 2.0 * (k * k - vh) / a0, b2: (vh - vb * k / q + k * k) / a0, a1: 2.0 * (k * k - 1.0) / a0, a2: (1.0 - k / q + k * k) / a0, x1: 0.0, x2: 0.0, y1: 0.0, y2: 0.0, } } /// Stage 2 of th BS.1770-4 pre-filter. pub fn high_pass(sample_rate_hz: f32) -> Filter { // Coefficients taken from https://github.com/csteinmetz1/pyloudnorm/blob/ // 6baa64d59b7794bc812e124438692e7fd2e65c0c/pyloudnorm/meter.py#L135-L136. let q = 0.500_327_05; let center_hz = 38.135_47; // Formula taken from https://github.com/csteinmetz1/pyloudnorm/blob/ // 6baa64d59b7794bc812e124438692e7fd2e65c0c/pyloudnorm/iirfilter.py#L145-L151 let k = (f32::consts::PI * center_hz / sample_rate_hz).tan(); Filter { a1: 2.0 * (k * k - 1.0) / (1.0 + k / q + k * k), a2: (1.0 - k / q + k * k) / (1.0 + k / q + k * k), b0: 1.0, b1: -2.0, b2: 1.0, x1: 0.0, x2: 0.0, y1: 0.0, y2: 0.0, } } /// Feed the next input sample, get the next output sample. #[inline(always)] pub fn apply(&mut self, x0: f32) -> f32 { let y0 = 0.0 + self.b0 * x0 + self.b1 * self.x1 + self.b2 * self.x2 - self.a1 * self.y1 - self.a2 * self.y2; self.x2 = self.x1; self.x1 = x0; self.y2 = self.y1; self.y1 = y0; y0 } } /// Compensated sum, for summing many values of different orders of magnitude /// accurately. #[derive(Copy, Clone, PartialEq)] struct Sum { sum: f32, residue: f32, } impl Sum { #[inline(always)] fn zero() -> Sum { Sum { sum: 0.0, residue: 0.0, } } #[inline(always)] fn add(&mut self, x: f32) { let sum = self.sum + (self.residue + x); self.residue = (self.residue + x) - (sum - self.sum); self.sum = sum; } } /// The mean of the squares of the K-weighted samples in a window of time. /// /// K-weighted power is equivalent to K-weighted loudness, the only difference /// is one of scale: power is quadratic in sample amplitudes, whereas loudness /// units are logarithmic. `loudness_lkfs` and `from_lkfs` convert between power, /// and K-weighted Loudness Units relative to nominal Full Scale (LKFS). /// /// The term “LKFS” (Loudness Units, K-Weighted, relative to nominal Full Scale) /// is used in BS.1770-4 to emphasize K-weighting, but the term is otherwise /// interchangeable with the more widespread term “LUFS” (Loudness Units, /// relative to Full Scale). Loudness units are related to decibels in the /// following sense: boosting a signal that has a loudness of /// -<var>L<sub>K</sub></var> LUFS by <var>L<sub>K</sub></var> dB (by /// multiplying the amplitude by 10<sup><var>L<sub>K</sub></var>/20</sup>) will /// bring the loudness to 0 LUFS. /// /// K-weighting refers to a high-shelf and high-pass filter that model the /// effect that humans perceive a certain amount of power in low frequencies to /// be less loud than the same amount of power in higher frequencies. In this /// library the `Power` type is used exclusively to refer to power after applying K-weighting. /// /// The nominal “full scale” is the range [-1.0, 1.0]. Because the power is the /// mean square of the samples, if no input samples exceeded the full scale, the /// power will be in the range [0.0, 1.0]. However, the power delivered by /// multiple channels, which is a weighted sum over individual channel powers, /// can exceed this range, because the weighted sum is not normalized. #[derive(Copy, Clone, PartialEq, PartialOrd)] pub struct Power(pub f32); impl Power { /// Convert Loudness Units relative to Full Scale into a squared sample amplitude. /// /// This is the inverse of `loudness_lkfs`. pub fn from_lkfs(lkfs: f32) -> Power { // The inverse of the formula below. Power(10.0_f32.powf((lkfs + 0.691) * 0.1)) } /// Return the loudness of this window in Loudness Units, K-weighted, relative to Full Scale. /// /// This is the inverse of `from_lkfs`. pub fn loudness_lkfs(&self) -> f32 { // Equation 2 (p.5) of BS.1770-4. -0.691 + 10.0 * self.0.log10() } } /// A `T` value for non-overlapping windows of audio, 100ms in length. /// /// The `ChannelLoudnessMeter` applies K-weighting and then produces the power /// for non-overlapping windows of 100ms duration. /// /// These non-overlapping 100ms windows can later be combined into overlapping /// windows of 400ms, spaced 100ms apart, to compute instantaneous loudness or /// to perform a gated measurement, or they can be combined into even larger /// windows for a momentary loudness measurement. #[derive(Copy, Clone, Debug)] pub struct Windows100ms<T> { pub inner: T, } impl<T> Windows100ms<T> { /// Wrap a new empty vector. pub fn new() -> Windows100ms<Vec<T>> { Windows100ms { inner: Vec::new() } } /// Apply `as_ref` to the inner value. pub fn as_ref(&self) -> Windows100ms<&[Power]> where T: AsRef<[Power]>, { Windows100ms { inner: self.inner.as_ref(), } } /// Apply `as_mut` to the inner value. pub fn as_mut(&mut self) -> Windows100ms<&mut [Power]> where T: AsMut<[Power]>, { Windows100ms { inner: self.inner.as_mut(), } } #[allow(clippy::len_without_is_empty)] /// Apply `len` to the inner value. pub fn len(&self) -> usize where T: AsRef<[Power]>, { self.inner.as_ref().len() } } /// Measures K-weighted power of non-overlapping 100ms windows of a single channel of audio. /// /// # Output /// /// The output of the meter is an intermediate result in the form of power for /// 100ms non-overlapping windows. The windows need to be processed further to /// get one of the instantaneous, momentary, and integrated loudness /// measurements defined in BS.1770. /// /// The windows can also be inspected directly; the data is meaningful /// on its own (the K-weighted power delivered in that window of time), but it /// is not something that BS.1770 defines a term for. /// /// # Multichannel audio /// /// To perform a loudness measurement of multichannel audio, construct a /// `ChannelLoudnessMeter` per channel, and later combine the measured power /// with e.g. `reduce_stereo`. /// /// # Instantaneous loudness /// /// The instantaneous loudness is the power over a 400ms window, so you can /// average four 100ms windows. No special functionality is implemented to help /// with that at this time. ([Pull requests would be accepted.][contribute]) /// /// # Momentary loudness /// /// The momentary loudness is the power over a 3-second window, so you can /// average thirty 100ms windows. No special functionality is implemented to /// help with that at this time. ([Pull requests would be accepted.][contribute]) /// /// # Integrated loudness /// /// Use `gated_mean` to perform an integrated loudness measurement: /// /// ```ignore /// # use std::iter; /// # use bs1770::{ChannelLoudnessMeter, gated_mean}; /// # let sample_rate_hz = 44_100; /// # let samples_per_100ms = sample_rate_hz / 10; /// # let mut meter = ChannelLoudnessMeter::new(sample_rate_hz); /// # meter.push((0..44_100).map(|i| (i as f32 * 0.01).sin())); /// let integrated_loudness_lkfs = gated_mean(meter.as_100ms_windows()) /// .unwrap_or(bs1770::Power(0.0)) /// .loudness_lkfs(); /// ``` /// /// [contribute]: https://github.com/ruuda/bs1770/blob/master/CONTRIBUTING.md #[derive(Clone)] pub struct ChannelLoudnessMeter { /// The number of samples that fit in 100ms of audio. samples_per_100ms: u32, /// Stage 1 filter (head effects, high shelf). filter_stage1: Filter, /// Stage 2 filter (high-pass). filter_stage2: Filter, /// Sum of the squares over non-overlapping windows of 100ms. windows: Windows100ms<Vec<Power>>, /// The number of samples in the current unfinished window. count: u32, /// The sum of the squares of the samples in the current unfinished window. square_sum: Sum, } impl ChannelLoudnessMeter { /// Construct a new loudness meter for the given sample rate. pub fn new(sample_rate_hz: u32) -> ChannelLoudnessMeter { ChannelLoudnessMeter { samples_per_100ms: sample_rate_hz / 10, filter_stage1: Filter::high_shelf(sample_rate_hz as f32), filter_stage2: Filter::high_pass(sample_rate_hz as f32), windows: Windows100ms::new(), count: 0, square_sum: Sum::zero(), } } /// Feed input samples for loudness analysis. /// /// # Full scale /// /// Full scale for the input samples is the interval [-1.0, 1.0]. If your /// input consists of signed integer samples, you can convert as follows: /// /// ```ignore /// # let mut meter = bs1770::ChannelLoudnessMeter::new(44_100); /// # let bits_per_sample = 16_usize; /// # let samples = &[0_i16]; /// // Note that the maximum amplitude is `1 << (bits_per_sample - 1)`, /// // one bit is the sign bit. /// let normalizer = 1.0 / (1_u64 << (bits_per_sample - 1)) as f32; /// meter.push(samples.iter().map(|&s| s as f32 * normalizer)); /// ``` /// /// # Repeated calls /// /// You can call `push` multiple times to feed multiple batches of samples. /// This is equivalent to feeding a single chained iterator. The leftover of /// samples that did not fill a full 100ms window is not discarded: /// /// ```ignore /// # use std::iter; /// # use bs1770::ChannelLoudnessMeter; /// let sample_rate_hz = 44_100; /// let samples_per_100ms = sample_rate_hz / 10; /// let mut meter = ChannelLoudnessMeter::new(sample_rate_hz); /// /// meter.push(iter::repeat(0.0).take(samples_per_100ms as usize - 1)); /// assert_eq!(meter.as_100ms_windows().len(), 0); /// /// meter.push(iter::once(0.0)); /// assert_eq!(meter.as_100ms_windows().len(), 1); /// ``` pub fn push<I: Iterator<Item = f32>>(&mut self, samples: I) { let normalizer = 1.0 / self.samples_per_100ms as f32; // LLVM, if you could go ahead and inline those apply calls, and then // unroll and vectorize the loop, that'd be terrific. for x in samples { let y = self.filter_stage1.apply(x); let z = self.filter_stage2.apply(y); self.square_sum.add(z * z); self.count += 1; // TODO: Should this branch be marked cold? if self.count == self.samples_per_100ms { let mean_squares = Power(self.square_sum.sum * normalizer); self.windows.inner.push(mean_squares); // We intentionally do not reset the residue. That way, leftover // energy from this window is not lost, so for the file overall, // the sum remains more accurate. self.square_sum.sum = 0.0; self.count = 0; } } } /// Return a reference to the 100ms windows analyzed so far. pub fn as_100ms_windows(&self) -> Windows100ms<&[Power]> { self.windows.as_ref() } /// Return all 100ms windows analyzed so far. pub fn into_100ms_windows(self) -> Windows100ms<Vec<Power>> { self.windows } } /// Combine power for multiple channels by taking a weighted sum. /// /// Note that BS.1770-4 defines power for a multi-channel signal as a weighted /// sum over channels which is not normalized. This means that a stereo signal /// is inherently louder than a mono signal. For a mono signal played back on /// stereo speakers, you should therefore still apply `reduce_stereo`, passing /// in the same signal for both channels. pub fn reduce_stereo( left: Windows100ms<&[Power]>, right: Windows100ms<&[Power]>, ) -> Windows100ms<Vec<Power>> { assert_eq!( left.len(), right.len(), "Channels must have the same length." ); let mut result = Vec::with_capacity(left.len()); for (l, r) in left.inner.iter().zip(right.inner) { result.push(Power(l.0 + r.0)); } Windows100ms { inner: result } } /// In-place version of `reduce_stereo` that stores the result in the former left channel. pub fn reduce_stereo_in_place(left: Windows100ms<&mut [Power]>, right: Windows100ms<&[Power]>) { assert_eq!( left.len(), right.len(), "Channels must have the same length." ); for (l, r) in left.inner.iter_mut().zip(right.inner) { l.0 += r.0; } } /// Perform gating and averaging for a BS.1770-4 integrated loudness measurement. /// /// The integrated loudness measurement is not just the average power over the /// entire signal. BS.1770-4 defines two stages of gating that exclude /// parts of the signal, to ensure that silent parts do not contribute to the /// loudness measurement. This function performs that gating, and returns the /// average power over the windows that were not excluded. /// /// The result of this function is the integrated loudness measurement. /// /// When no signal remains after applying the gate, this function returns /// `None`. In particular, this happens when all of the signal is softer than /// -70 LKFS, including a signal that consists of pure silence. pub fn gated_mean(windows_100ms: Windows100ms<&[Power]>) -> Option<Power> { let mut gating_blocks = Vec::with_capacity(windows_100ms.len()); // Stage 1: an absolute threshold of -70 LKFS. (Equation 6, p.6.) let absolute_threshold = Power::from_lkfs(-70.0); // Iterate over all 400ms windows. for window in windows_100ms.inner.windows(4) { // Note that the sum over channels has already been performed at this point. let gating_block_power = Power(0.25 * window.iter().map(|mean| mean.0).sum::<f32>()); if gating_block_power > absolute_threshold { gating_blocks.push(gating_block_power); } } if gating_blocks.is_empty() { return None; } // Compute the loudness after applying the absolute gate, in order to // determine the threshold for the relative gate. let mut sum_power = Sum::zero(); for &gating_block_power in &gating_blocks { sum_power.add(gating_block_power.0); } let absolute_gated_power = Power(sum_power.sum / (gating_blocks.len() as f32)); // Stage 2: Apply the relative gate. let relative_threshold = Power::from_lkfs(absolute_gated_power.loudness_lkfs() - 10.0); let mut sum_power = Sum::zero(); let mut n_blocks = 0_usize; for &gating_block_power in &gating_blocks { if gating_block_power > relative_threshold { sum_power.add(gating_block_power.0); n_blocks += 1; } } if n_blocks == 0 { return None; } let relative_gated_power = Power(sum_power.sum / n_blocks as f32); Some(relative_gated_power) }
candle/candle-examples/src/bs1770.rs/0
{ "file_path": "candle/candle-examples/src/bs1770.rs", "repo_id": "candle", "token_count": 7220 }
33
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once // #include <ATen/cuda/CUDAContext.h> #include "error.h" #include "static_switch.h" #include "flash.h" #include "flash_fwd_kernel.h" // Determine if the architecture supports FLASH and define a macro to handle parameter modifiers #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 #define ARCH_SUPPORTS_FLASH #define KERNEL_PARAM_MODIFIER __grid_constant__ #else #define KERNEL_PARAM_MODIFIER #endif // Define a macro for unsupported architecture handling to centralize the error message #define FLASH_UNSUPPORTED_ARCH printf("FATAL: FlashAttention requires building with sm version sm80-sm90, but was built for < 8.0!"); // Use a macro to clean up kernel definitions #define DEFINE_FLASH_FORWARD_KERNEL(kernelName, ...) \ template<typename Kernel_traits, __VA_ARGS__> \ __global__ void kernelName(KERNEL_PARAM_MODIFIER const Flash_fwd_params params) DEFINE_FLASH_FORWARD_KERNEL(flash_fwd_kernel, bool Is_dropout, bool Is_causal, bool Is_local, bool Has_alibi, bool Is_even_MN, bool Is_even_K, bool Is_softcap, bool Return_softmax) { #if defined(ARCH_SUPPORTS_FLASH) static_assert(!(Is_causal && Is_local)); // Enforce constraints flash::compute_attn<Kernel_traits, Is_dropout, Is_causal, Is_local, Has_alibi, Is_even_MN, Is_even_K, Is_softcap, Return_softmax>(params); #else FLASH_UNSUPPORTED_ARCH #endif } DEFINE_FLASH_FORWARD_KERNEL(flash_fwd_splitkv_kernel, bool Is_causal, bool Is_local, bool Has_alibi, bool Is_even_MN, bool Is_even_K, bool Is_softcap, bool Split, bool Append_KV) { #if defined(ARCH_SUPPORTS_FLASH) flash::compute_attn_splitkv<Kernel_traits, Is_causal, Is_local, Has_alibi, Is_even_MN, Is_even_K, Is_softcap, Split, Append_KV>(params); #else FLASH_UNSUPPORTED_ARCH #endif } DEFINE_FLASH_FORWARD_KERNEL(flash_fwd_splitkv_combine_kernel, int kBlockM, int Log_max_splits, bool Is_even_K) { static_assert(Log_max_splits >= 1); flash::combine_attn_seqk_parallel<Kernel_traits, kBlockM, Log_max_splits, Is_even_K>(params); } template<typename Kernel_traits, bool Is_dropout, bool Is_causal> void run_flash_fwd(Flash_fwd_params &params, cudaStream_t stream) { constexpr size_t smem_size = Kernel_traits::kSmemSize; // printf("smem_size = %d\n", smem_size); // Work-around for gcc 7. It doesn't like nested BOOL_SWITCH. // https://github.com/kokkos/kokkos-kernels/issues/349 // https://github.com/HazyResearch/flash-attention/issues/21 const int num_m_block = (params.seqlen_q + Kernel_traits::kBlockM - 1) / Kernel_traits::kBlockM; dim3 grid(num_m_block, params.b, params.h); const bool is_even_MN = params.cu_seqlens_q == nullptr && params.cu_seqlens_k == nullptr && params.seqlen_k % Kernel_traits::kBlockN == 0 && params.seqlen_q % Kernel_traits::kBlockM == 0; const bool is_even_K = params.d == Kernel_traits::kHeadDim; const bool return_softmax = params.p_ptr != nullptr; BOOL_SWITCH(is_even_MN, IsEvenMNConst, [&] { EVENK_SWITCH(is_even_K, IsEvenKConst, [&] { LOCAL_SWITCH((params.window_size_left >= 0 || params.window_size_right >= 0) && !Is_causal, Is_local, [&] { BOOL_SWITCH(return_softmax, ReturnSoftmaxConst, [&] { ALIBI_SWITCH(params.alibi_slopes_ptr != nullptr, Has_alibi, [&] { SOFTCAP_SWITCH(params.softcap > 0.0, Is_softcap, [&] { // Will only return softmax if dropout, to reduce compilation time. // If not IsEvenKConst, we also set IsEvenMNConst to false to reduce number of templates. // If return_softmax, set IsEvenMNConst to false to reduce number of templates // If head dim > 128, set IsEvenMNConst to false to reduce number of templates // If Is_local, set Is_causal to false auto kernel = &flash_fwd_kernel<Kernel_traits, Is_dropout, Is_causal, Is_local && !Is_causal, Has_alibi, IsEvenMNConst && IsEvenKConst && !Is_local && !ReturnSoftmaxConst && Kernel_traits::kHeadDim <= 128, IsEvenKConst, Is_softcap, ReturnSoftmaxConst && Is_dropout>; // auto kernel = &flash_fwd_kernel<Kernel_traits, false, Is_causal, false, false, true, true, false>; // printf("IsEvenMNConst = %d, IsEvenKConst = %d, Is_local = %d, Is_causal = %d, ReturnSoftmaxConst = %d, Is_dropout = %d\n", int(IsEvenMNConst), int(IsEvenKConst), int(Is_local), int(Is_causal), int(ReturnSoftmaxConst), int(Is_dropout)); // auto kernel = &flash_fwd_kernel<Kernel_traits, false, Is_causal, false, true, true, false>; if (smem_size >= 48 * 1024) { C10_CUDA_CHECK(cudaFuncSetAttribute( kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size)); } // int ctas_per_sm; // cudaError status_ = cudaOccupancyMaxActiveBlocksPerMultiprocessor( // &ctas_per_sm, kernel, Kernel_traits::kNThreads, smem_size); // printf("smem_size = %d, CTAs per SM = %d\n", int(smem_size), ctas_per_sm); kernel<<<grid, Kernel_traits::kNThreads, smem_size, stream>>>(params); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); }); }); }); }); } template<typename Kernel_traits, bool Is_causal> void run_flash_splitkv_fwd(Flash_fwd_params &params, cudaStream_t stream) { static_assert(!Kernel_traits::Is_Q_in_regs, "SplitKV implementation does not support Is_Q_in_regs"); static_assert(!Kernel_traits::Share_Q_K_smem, "SplitKV implementation does not support Share_Q_K_smem"); constexpr size_t smem_size = Kernel_traits::kSmemSize; const int num_m_block = (params.seqlen_q + Kernel_traits::kBlockM - 1) / Kernel_traits::kBlockM; dim3 grid(num_m_block, params.num_splits > 1 ? params.num_splits : params.b, params.num_splits > 1 ? params.b * params.h : params.h); const bool is_even_MN = params.cu_seqlens_q == nullptr && params.cu_seqlens_k == nullptr && params.seqlen_k % Kernel_traits::kBlockN == 0 && params.seqlen_q % Kernel_traits::kBlockM == 0; const bool is_even_K = params.d == Kernel_traits::kHeadDim; BOOL_SWITCH(is_even_MN, IsEvenMNConst, [&] { EVENK_SWITCH(is_even_K, IsEvenKConst, [&] { LOCAL_SWITCH((params.window_size_left >= 0 || params.window_size_right >= 0) && !Is_causal, Is_local, [&] { BOOL_SWITCH(params.num_splits > 1, Split, [&] { BOOL_SWITCH(params.knew_ptr != nullptr, Append_KV, [&] { ALIBI_SWITCH(params.alibi_slopes_ptr != nullptr, Has_alibi, [&] { SOFTCAP_SWITCH(params.softcap > 0.0, Is_softcap, [&] { // If Append_KV, then we must have seqlen_offsets, which means cu_seqlens_k != nullptr. // If not IsEvenKConst, we also set IsEvenMNConst to false to reduce number of templates. // If Is_local, set Is_causal to false auto kernel = &flash_fwd_splitkv_kernel<Kernel_traits, Is_causal, Is_local && !Is_causal, Has_alibi, IsEvenMNConst && !Append_KV && IsEvenKConst && !Is_local && Kernel_traits::kHeadDim <= 128, IsEvenKConst, Is_softcap, Split, Append_KV>; // auto kernel = &flash_fwd_splitkv_kernel<Kernel_traits, Is_causal, false, true, Split, Append_KV>; // auto kernel = &flash_fwd_splitkv_kernel<Kernel_traits, Is_causal, false, IsEvenKConst>; if (smem_size >= 48 * 1024) { C10_CUDA_CHECK(cudaFuncSetAttribute( kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size)); } kernel<<<grid, Kernel_traits::kNThreads, smem_size, stream>>>(params); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); }); }); }); }); }); if (params.num_splits > 1) { // We want kBlockM to be as small as possible for more parallelism. // With 128 threads we can load 512 elements at a time, so if headdim is divisible by 128, kBlockM = 4. // If headdim is divisible by 64, then we set kBlockM = 8, etc. constexpr static int kBlockM = Kernel_traits::kHeadDim % 128 == 0 ? 4 : (Kernel_traits::kHeadDim % 64 == 0 ? 8 : 16); dim3 grid_combine((params.b * params.h * params.seqlen_q + kBlockM - 1) / kBlockM); EVENK_SWITCH(is_even_K, IsEvenKConst, [&] { if (params.num_splits <= 2) { flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 1, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params); } else if (params.num_splits <= 4) { flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 2, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params); } else if (params.num_splits <= 8) { flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 3, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params); } else if (params.num_splits <= 16) { flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 4, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params); } else if (params.num_splits <= 32) { flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 5, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params); } else if (params.num_splits <= 64) { flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 6, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params); } else if (params.num_splits <= 128) { flash_fwd_splitkv_combine_kernel<Kernel_traits, kBlockM, 7, IsEvenKConst><<<grid_combine, Kernel_traits::kNThreads, 0, stream>>>(params); } C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } } template<typename T, int Headdim, bool Is_causal> void run_mha_fwd_splitkv_dispatch(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int kBlockM = 64; // Fixed for all head dimensions // TD [2023-08-28]: nvcc segfaults for headdim 96 with block size 64 x 256, // and for headdim 192 with block size 64 x 128. // Also for headdim 160 with block size 64 x 128 after the rotary addition. constexpr static int kBlockN = Headdim <= 64 ? 256 : (Headdim <= 128 ? 128 : 64); run_flash_splitkv_fwd<Flash_fwd_kernel_traits<Headdim, kBlockM, kBlockN, 4, false, false, T>, Is_causal>(params, stream); } template<typename T, bool Is_causal> void run_mha_fwd_hdim32(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 32; DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); }); } template<typename T, bool Is_causal> void run_mha_fwd_hdim64(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 64; DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { if constexpr(!Is_dropout) { // Using 8 warps is 18% slower for seqlen=2k, 2 warps is 5% slower // Using block size (64 x 256) is 27% slower for seqlen=2k // Using block size (256 x 64) is 85% slower for seqlen=2k, because of register spilling run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } }); } inline bool cuda_is_sm8x() { // dprops = at::cuda::getCurrentDeviceProperties(); // return dprops->major == 8 && dprops->minor > 0; return false; } template<typename T, bool Is_causal> void run_mha_fwd_hdim96(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 96; bool is_sm8x = cuda_is_sm8x(); DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { // For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square), if (is_sm8x) { if constexpr(!Is_causal) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); // These two are always slower // run_flash_fwd<Flash_fwd_kernel_traits<96, 128, 128, 4, true, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<96, 64, 128, 4, true, T>>(params, stream); }); } template<typename T, bool Is_causal> void run_mha_fwd_hdim128(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 128; bool is_sm8x = cuda_is_sm8x(); DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { if constexpr(!Is_dropout) { // For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square), // and 128 x 32 (48 KB smem) is the fastest for non-causal since we get 2 CTAs per SM. if (is_sm8x) { if constexpr(!Is_causal) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // Using 8 warps (128 x 128 and 256 x 64) is 28% slower for seqlen=2k // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); // 1st ones are good for H100, A100 // 2nd one is good for A6000 bc we get slightly better occupancy } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); } }); } template<typename T, bool Is_causal> void run_mha_fwd_hdim160(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 160; bool is_sm8x = cuda_is_sm8x(); DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { // For A100, H100, 128 x 32 is the fastest. // For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square), // and 128 x 64 with 8 warps is the fastest for non-causal. if (is_sm8x) { if constexpr(!Is_causal) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, true, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 128, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 8, false, T>>(params, stream); }); } template<typename T, bool Is_causal> void run_mha_fwd_hdim192(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 192; DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { if constexpr(!Is_dropout) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 128, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 8, false, T>>(params, stream); }); } template<typename T, bool Is_causal> void run_mha_fwd_hdim224(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 224; int device; cudaGetDevice(&device); int max_smem_per_block; cudaError status_ = cudaDeviceGetAttribute( &max_smem_per_block, cudaDevAttrMaxSharedMemoryPerBlockOptin, device); if (status_ != cudaSuccess) { C10_CUDA_CHECK(status_); } // printf("max_smem_per_block = %d\n", max_smem_per_block); DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { if (max_smem_per_block >= 2 * Headdim * (128 + 2 * 64)) { // 112 KB run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // We can't do 128 x 32 with 8 warps because with headdim 224, kBlockKSmem = 32. // If we have N = 32, there are only 1024 elements to load at once, where each load // is 8 elements. This means we can only use 128 threads and not 256 threads. // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); }); } template<typename T, bool Is_causal> void run_mha_fwd_hdim256(Flash_fwd_params &params, cudaStream_t stream) { constexpr static int Headdim = 256; int device; cudaGetDevice(&device); int max_smem_per_sm, max_smem_per_block; cudaError status_ = cudaDeviceGetAttribute( &max_smem_per_sm, cudaDevAttrMaxSharedMemoryPerMultiprocessor, device); status_ = cudaDeviceGetAttribute( &max_smem_per_block, cudaDevAttrMaxSharedMemoryPerBlockOptin, device); if (status_ != cudaSuccess) { C10_CUDA_CHECK(status_); } // printf("max_smem_per_sm = %d, max_smem_per_block = %d\n", max_smem_per_sm, max_smem_per_block); DROPOUT_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { // For A100, we want to run with 128 x 64 (128KB smem). // For H100 we want to run with 64 x 64 (96KB smem) since then we can get 2 CTAs per SM. if (max_smem_per_block >= 2 * Headdim * (128 + 2 * 64) && max_smem_per_sm < 4 * Headdim * (64 + 2 * 64)) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // 64 KB // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // 96 KB // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); }); }
candle/candle-flash-attn/kernels/flash_fwd_launch_template.h/0
{ "file_path": "candle/candle-flash-attn/kernels/flash_fwd_launch_template.h", "repo_id": "candle", "token_count": 10558 }
34
fn main() { println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-changed=src/compatibility.cuh"); println!("cargo:rerun-if-changed=src/cuda_utils.cuh"); println!("cargo:rerun-if-changed=src/binary_op_macros.cuh"); let builder = bindgen_cuda::Builder::default(); println!("cargo:info={builder:?}"); let bindings = builder.build_ptx().unwrap(); bindings.write("src/lib.rs").unwrap(); }
candle/candle-kernels/build.rs/0
{ "file_path": "candle/candle-kernels/build.rs", "repo_id": "candle", "token_count": 177 }
35
[package] name = "candle-metal-kernels" version = "0.6.1" edition = "2021" description = "Metal kernels for Candle" repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [dependencies] metal = { version = "0.27.0", features = ["mps"] } once_cell = "1.18.0" thiserror = "1" tracing = "0.1.37" [dev-dependencies] half = { version = "2.3.1", features = [ "num-traits", "use-intrinsics", "rand_distr", ] } rand = "0.8.5"
candle/candle-metal-kernels/Cargo.toml/0
{ "file_path": "candle/candle-metal-kernels/Cargo.toml", "repo_id": "candle", "token_count": 218 }
36
use metal::{Buffer, ComputeCommandEncoderRef, ComputePipelineState, MTLSize}; use std::ffi::c_void; /// Most kernels apply similarly across the tensors /// This creates a strategy that uses the maximum amount of threads per threadgroup (capped at the /// actual total buffer length). /// Then kernels can just do their op on their single point in the buffer. pub(crate) fn linear_split(pipeline: &ComputePipelineState, length: usize) -> (MTLSize, MTLSize) { let size = length as u64; let width = std::cmp::min(pipeline.max_total_threads_per_threadgroup(), size); let count = (size + width - 1) / width; let thread_group_count = MTLSize { width: count, height: 1, depth: 1, }; let thread_group_size = MTLSize { width, height: 1, depth: 1, }; (thread_group_count, thread_group_size) } // https://github.com/ml-explore/mlx/blob/bddf23f175726a57f0e443cd45518c0757daa166/mlx/backend/metal/utils.h#L96 pub(crate) fn get_block_dims(dim0: u64, dim1: u64, dim2: u64) -> MTLSize { let mut pows0 = 0u64; let mut pows1 = 0u64; let mut pows2 = 0u64; let mut sum = 0u64; loop { let presum = sum; // Check all the pows if dim0 >= (1 << (pows0 + 1)) { pows0 += 1; sum += 1; } if sum == 10 { break; } if dim1 >= (1 << (pows1 + 1)) { pows1 += 1; sum += 1; } if sum == 10 { break; } if dim2 >= (1 << (pows2 + 1)) { pows2 += 1; sum += 1; } if sum == presum || sum == 10 { break; } } MTLSize { width: 1 << pows0, height: 1 << pows1, depth: 1 << pows2, } } pub(crate) fn set_param<P: EncoderParam>( encoder: &ComputeCommandEncoderRef, position: u64, data: P, ) { <P as EncoderParam>::set_param(encoder, position, data) } /// Helper functions to create the various objects on the compute command encoder /// on a single line. /// Prevents getting wrong some arguments number and mixing length and size in bytes. pub(crate) trait EncoderParam { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self); } macro_rules! primitive { ($type:ty) => { impl EncoderParam for $type { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_bytes( position, core::mem::size_of::<$type>() as u64, &data as *const $type as *const c_void, ); } } }; } primitive!(bool); primitive!(usize); primitive!(i32); primitive!(i64); primitive!(u32); primitive!(u64); primitive!(f32); pub struct BufferOffset<'a> { pub buffer: &'a Buffer, pub offset_in_bytes: usize, } impl<'a> BufferOffset<'a> { pub fn zero_offset(buffer: &'a Buffer) -> Self { Self { buffer, offset_in_bytes: 0, } } } impl<T> EncoderParam for &[T] { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_bytes( position, core::mem::size_of_val(data) as u64, data.as_ptr() as *const c_void, ); } } impl EncoderParam for &Buffer { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data), 0); } } impl EncoderParam for (&Buffer, usize) { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data.0), data.1 as u64); } } impl<'a> EncoderParam for &BufferOffset<'a> { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data.buffer), data.offset_in_bytes as u64); } } impl EncoderParam for &mut Buffer { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data), 0); } } impl EncoderParam for (&mut Buffer, usize) { fn set_param(encoder: &ComputeCommandEncoderRef, position: u64, data: Self) { encoder.set_buffer(position, Some(data.0), data.1 as u64); } } #[macro_export] macro_rules! set_params { ($encoder:ident, ($($param:expr),+)) => ( let mut _index = 0; $( $crate::utils::set_param($encoder, _index, $param); _index += 1; )* ); } pub trait EncoderProvider { type Encoder<'a>: AsRef<metal::ComputeCommandEncoderRef> where Self: 'a; fn encoder<'a>(&'a self) -> Self::Encoder<'a>; } pub struct WrappedEncoder<'a>(&'a ComputeCommandEncoderRef); impl<'a> Drop for WrappedEncoder<'a> { fn drop(&mut self) { self.0.end_encoding() } } impl<'a> AsRef<metal::ComputeCommandEncoderRef> for WrappedEncoder<'a> { fn as_ref(&self) -> &metal::ComputeCommandEncoderRef { &self.0 } } impl EncoderProvider for &metal::CommandBuffer { type Encoder<'a> = WrappedEncoder<'a> where Self: 'a; fn encoder<'a>(&'a self) -> Self::Encoder<'a> { WrappedEncoder(self.new_compute_command_encoder()) } } impl EncoderProvider for &metal::CommandBufferRef { type Encoder<'a> = WrappedEncoder<'a> where Self: 'a; fn encoder<'a>(&'a self) -> Self::Encoder<'a> { WrappedEncoder(self.new_compute_command_encoder()) } }
candle/candle-metal-kernels/src/utils.rs/0
{ "file_path": "candle/candle-metal-kernels/src/utils.rs", "repo_id": "candle", "token_count": 2570 }
37
//! Embedding Layer. use candle::{Result, Tensor}; #[derive(Clone, Debug)] pub struct Embedding { embeddings: Tensor, hidden_size: usize, } impl Embedding { pub fn new(embeddings: Tensor, hidden_size: usize) -> Self { Self { embeddings, hidden_size, } } pub fn embeddings(&self) -> &Tensor { &self.embeddings } /// Get the hidden size of the embedding matrix pub fn hidden_size(&self) -> usize { self.hidden_size } } impl crate::Module for Embedding { fn forward(&self, indexes: &Tensor) -> Result<Tensor> { let mut final_dims = indexes.dims().to_vec(); final_dims.push(self.hidden_size); let indexes = indexes.flatten_all()?; let values = self.embeddings.index_select(&indexes, 0)?; let values = values.reshape(final_dims)?; Ok(values) } } pub fn embedding(in_size: usize, out_size: usize, vb: crate::VarBuilder) -> Result<Embedding> { let embeddings = vb.get_with_hints( (in_size, out_size), "weight", crate::Init::Randn { mean: 0., stdev: 1., }, )?; Ok(Embedding::new(embeddings, out_size)) }
candle/candle-nn/src/embedding.rs/0
{ "file_path": "candle/candle-nn/src/embedding.rs", "repo_id": "candle", "token_count": 571 }
38
use candle::{DType, Device, Result, Shape, Tensor, Var}; use std::collections::HashMap; use std::sync::{Arc, Mutex}; /// A `VarMap` is a store that holds named variables. Variables can be retrieved from the stores /// and new variables can be added by providing some initialization config in case they are /// missing. /// `VarMap` structures can be serialized in the safetensors format. #[derive(Clone)] pub struct VarMap { data: Arc<Mutex<HashMap<String, Var>>>, } impl VarMap { /// Create a new empty `VarMap`. #[allow(clippy::new_without_default)] pub fn new() -> Self { let data = Arc::new(Mutex::new(HashMap::new())); Self { data } } /// Retrieve all the variables currently stored in the map. pub fn all_vars(&self) -> Vec<Var> { let tensor_data = self.data.lock().unwrap(); #[allow(clippy::map_clone)] tensor_data.values().map(|c| c.clone()).collect::<Vec<_>>() } /// Save the map in the safetensors format. pub fn save<P: AsRef<std::path::Path>>(&self, path: P) -> Result<()> { let tensor_data = self.data.lock().unwrap(); let data = tensor_data.iter().map(|(k, v)| (k, v.as_tensor())); safetensors::tensor::serialize_to_file(data, &None, path.as_ref())?; Ok(()) } /// Load some values from a safetensors file and modify the existing variables to have these /// values. /// /// Note that values for variables that are currently not in the map are not kept. pub fn load<P: AsRef<std::path::Path>>(&mut self, path: P) -> Result<()> { let path = path.as_ref(); let data = unsafe { candle::safetensors::MmapedSafetensors::new(path)? }; let mut tensor_data = self.data.lock().unwrap(); for (name, var) in tensor_data.iter_mut() { let data = data.load(name, var.device())?; if let Err(err) = var.set(&data) { candle::bail!("error setting {name} using data from {path:?}: {err}",) } } Ok(()) } /// Set a named variable to some value. pub fn set_one<K: AsRef<str>, V: AsRef<Tensor>>(&mut self, name: K, value: V) -> Result<()> { let tensor_data = self.data.lock().unwrap(); let name = name.as_ref(); match tensor_data.get(name) { None => candle::bail!("cannot find {name} in VarMap"), Some(var) => { if let Err(err) = var.set(value.as_ref()) { candle::bail!("error setting {name}: {err}",) } } } Ok(()) } /// Set some named variables to some values. /// /// If an error is returned, some of the variables might have already been set to their new /// values. pub fn set<I: Iterator<Item = (K, V)>, K: AsRef<str>, V: AsRef<Tensor>>( &mut self, iter: I, ) -> Result<()> { let tensor_data = self.data.lock().unwrap(); for (name, value) in iter { let name = name.as_ref(); match tensor_data.get(name) { None => candle::bail!("cannot find {name} in VarMap"), Some(var) => { if let Err(err) = var.set(value.as_ref()) { candle::bail!("error setting {name}: {err}",) } } } } Ok(()) } /// Retrieve or add a new variable. pub fn get<S: Into<Shape>>( &self, shape: S, path: &str, init: crate::Init, dtype: DType, device: &Device, ) -> Result<Tensor> { let shape = shape.into(); let mut tensor_data = self.data.lock().unwrap(); if let Some(tensor) = tensor_data.get(path) { let tensor_shape = tensor.shape(); if &shape != tensor_shape { candle::bail!("shape mismatch on {path}: {shape:?} <> {tensor_shape:?}") } return Ok(tensor.as_tensor().clone()); } let var = init.var(shape, dtype, device)?; let tensor = var.as_tensor().clone(); tensor_data.insert(path.to_string(), var); Ok(tensor) } pub fn data(&self) -> &Mutex<HashMap<String, Var>> { &self.data } }
candle/candle-nn/src/var_map.rs/0
{ "file_path": "candle/candle-nn/src/var_map.rs", "repo_id": "candle", "token_count": 1973 }
39
import math from typing import Any import candle from candle import Tensor from .module import Module # See https://github.com/pytorch/pytorch/blob/main/torch/nn/modules/linear.py class Identity(Module): r"""A placeholder identity operator that is argument-insensitive. Args: args: any argument (unused) kwargs: any keyword argument (unused) Shape: - Input: :math:`(*)`, where :math:`*` means any number of dimensions. - Output: :math:`(*)`, same shape as the input. Examples:: >>> m = nn.Identity(54, unused_argument1=0.1, unused_argument2=False) >>> input = candle.randn(128, 20) >>> output = m(input) >>> print(output.shape) """ def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__() def forward(self, input: Tensor) -> Tensor: return input class Linear(Module): r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b` Args: in_features: size of each input sample out_features: size of each output sample bias: If set to ``False``, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(*, H_{in})` where :math:`*` means any number of dimensions including none and :math:`H_{in} = \text{in\_features}`. - Output: :math:`(*, H_{out})` where all but the last dimension are the same shape as the input and :math:`H_{out} = \text{out\_features}`. Attributes: weight: the learnable weights of the module of shape :math:`(\text{out\_features}, \text{in\_features})`. The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where :math:`k = \frac{1}{\text{in\_features}}` bias: the learnable bias of the module of shape :math:`(\text{out\_features})`. If :attr:`bias` is ``True``, the values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{1}{\text{in\_features}}` """ __constants__ = ["in_features", "out_features"] in_features: int out_features: int weight: Tensor def __init__( self, in_features: int, out_features: int, bias: bool = True, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super().__init__() # Allow 'weight' to be quantized self._quantizable_buffers.add("weight") self.in_features = in_features self.out_features = out_features # TODO: Do actual initialization here: e.g. kaiming_uniform or xavier_uniform self.weight = candle.ones((out_features, in_features), **factory_kwargs) if bias: self.bias = candle.zeros((out_features,), **factory_kwargs) else: self.bias = None def forward(self, x: Tensor) -> Tensor: dims = x.shape last_dim = dims[-1] if isinstance(self.weight, candle.QTensor): if len(dims) < 3: matmul_result = self.weight.matmul_t(x).broadcast_add(self.bias) elif len(dims) == 3: b, n, m = dims output_shape = (b, n, self.out_features) re = x.reshape((b * n, m)) matmul_result = self.weight.matmul_t(re).reshape((output_shape)) else: raise NotImplementedError("'QTensor.matmul_t' is not implemented for more than 3 dimensions") if self.bias: return matmul_result.broadcast_add(self.bias) else: if self.weight.shape[-1] == last_dim and len(dims) < 3: w = self.weight.t() else: batch_size = dims[0] w = self.weight.broadcast_left((batch_size,)).t() x = x.matmul(w) if self.bias is not None: x = x.broadcast_add(self.bias) return x def extra_repr(self) -> str: return f"in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}"
candle/candle-pyo3/py_src/candle/nn/linear.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/nn/linear.py", "repo_id": "candle", "token_count": 1947 }
40
# See: https://raw.githubusercontent.com/huggingface/tokenizers/main/bindings/python/stub.py import argparse import inspect import os from typing import Optional import black from pathlib import Path import re INDENT = " " * 4 GENERATED_COMMENT = "# Generated content DO NOT EDIT\n" TYPING = """from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Sequence from os import PathLike """ CANDLE_SPECIFIC_TYPING = "from candle.typing import _ArrayLike, Device, Scalar, Index, Shape\n" CANDLE_TENSOR_IMPORTS = "from candle import Tensor,DType,QTensor\n" RETURN_TYPE_MARKER = "&RETURNS&: " ADDITIONAL_TYPEHINTS = {} FORWARD_REF_PATTERN = re.compile(r"ForwardRef\('([^']+)'\)") def do_indent(text: Optional[str], indent: str): if text is None: return "" return text.replace("\n", f"\n{indent}") def function(obj, indent: str, text_signature: str = None): if text_signature is None: text_signature = obj.__text_signature__ text_signature = text_signature.replace("$self", "self").lstrip().rstrip() doc_string = obj.__doc__ if doc_string is None: doc_string = "" # Check if we have a return type annotation in the docstring return_type = None doc_lines = doc_string.split("\n") if doc_lines[-1].lstrip().startswith(RETURN_TYPE_MARKER): # Extract the return type and remove it from the docstring return_type = doc_lines[-1].lstrip()[len(RETURN_TYPE_MARKER) :].strip() doc_string = "\n".join(doc_lines[:-1]) string = "" if return_type: string += f"{indent}def {obj.__name__}{text_signature} -> {return_type}:\n" else: string += f"{indent}def {obj.__name__}{text_signature}:\n" indent += INDENT string += f'{indent}"""\n' string += f"{indent}{do_indent(doc_string, indent)}\n" string += f'{indent}"""\n' string += f"{indent}pass\n" string += "\n" string += "\n" return string def member_sort(member): if inspect.isclass(member): value = 10 + len(inspect.getmro(member)) else: value = 1 return value def fn_predicate(obj): value = inspect.ismethoddescriptor(obj) or inspect.isbuiltin(obj) if value: return obj.__text_signature__ and not obj.__name__.startswith("_") if inspect.isgetsetdescriptor(obj): return not obj.__name__.startswith("_") return False def get_module_members(module): members = [ member for name, member in inspect.getmembers(module) if not name.startswith("_") and not inspect.ismodule(member) ] members.sort(key=member_sort) return members def pyi_file(obj, indent=""): string = "" if inspect.ismodule(obj): string += GENERATED_COMMENT string += TYPING string += CANDLE_SPECIFIC_TYPING if obj.__name__ != "candle.candle": string += CANDLE_TENSOR_IMPORTS members = get_module_members(obj) for member in members: string += pyi_file(member, indent) elif inspect.isclass(obj): indent += INDENT mro = inspect.getmro(obj) if len(mro) > 2: inherit = f"({mro[1].__name__})" else: inherit = "" string += f"class {obj.__name__}{inherit}:\n" body = "" if obj.__doc__: body += f'{indent}"""\n{indent}{do_indent(obj.__doc__, indent)}\n{indent}"""\n' fns = inspect.getmembers(obj, fn_predicate) # Init if obj.__text_signature__: body += f"{indent}def __init__{obj.__text_signature__}:\n" body += f"{indent+INDENT}pass\n" body += "\n" if obj.__name__ in ADDITIONAL_TYPEHINTS: additional_members = inspect.getmembers(ADDITIONAL_TYPEHINTS[obj.__name__]) additional_functions = [] for name, member in additional_members: if inspect.isfunction(member): additional_functions.append((name, member)) def process_additional_function(fn): signature = inspect.signature(fn) cleaned_signature = re.sub(FORWARD_REF_PATTERN, r"\1", str(signature)) string = f"{indent}def {fn.__name__}{cleaned_signature}:\n" string += ( f'{indent+INDENT}"""{indent+INDENT}{do_indent(fn.__doc__, indent+INDENT)}{indent+INDENT}"""\n' ) string += f"{indent+INDENT}pass\n" string += "\n" return string for name, fn in additional_functions: body += process_additional_function(fn) for name, fn in fns: body += pyi_file(fn, indent=indent) if not body: body += f"{indent}pass\n" string += body string += "\n\n" elif inspect.isbuiltin(obj): string += f"{indent}@staticmethod\n" string += function(obj, indent) elif inspect.ismethoddescriptor(obj): string += function(obj, indent) elif inspect.isgetsetdescriptor(obj): # TODO it would be interesting to add the setter maybe ? string += f"{indent}@property\n" string += function(obj, indent, text_signature="(self)") elif obj.__class__.__name__ == "DType": string += f"class {str(obj).lower()}(DType):\n" string += f"{indent+INDENT}pass\n" else: raise Exception(f"Object {obj} is not supported") return string def py_file(module, origin): members = get_module_members(module) string = GENERATED_COMMENT string += f"from .. import {origin}\n" string += "\n" for member in members: if hasattr(member, "__name__"): name = member.__name__ else: name = str(member) string += f"{name} = {origin}.{name}\n" return string def do_black(content, is_pyi): mode = black.Mode( target_versions={black.TargetVersion.PY35}, line_length=119, is_pyi=is_pyi, string_normalization=True, ) try: return black.format_file_contents(content, fast=True, mode=mode) except black.NothingChanged: return content def write(module, directory, origin, check=False): submodules = [(name, member) for name, member in inspect.getmembers(module) if inspect.ismodule(member)] filename = os.path.join(directory, "__init__.pyi") pyi_content = pyi_file(module) pyi_content = do_black(pyi_content, is_pyi=True) os.makedirs(directory, exist_ok=True) if check: with open(filename, "r") as f: data = f.read() print("generated content") print(pyi_content) assert data == pyi_content, f"The content of {filename} seems outdated, please run `python stub.py`" else: with open(filename, "w") as f: f.write(pyi_content) filename = os.path.join(directory, "__init__.py") py_content = py_file(module, origin) py_content = do_black(py_content, is_pyi=False) os.makedirs(directory, exist_ok=True) is_auto = False if not os.path.exists(filename): is_auto = True else: with open(filename, "r") as f: line = f.readline() if line == GENERATED_COMMENT: is_auto = True if is_auto: if check: with open(filename, "r") as f: data = f.read() print("generated content") print(py_content) assert data == py_content, f"The content of {filename} seems outdated, please run `python stub.py`" else: with open(filename, "w") as f: f.write(py_content) for name, submodule in submodules: write(submodule, os.path.join(directory, name), f"{name}", check=check) def extract_additional_types(module): additional_types = {} for name, member in inspect.getmembers(module): if inspect.isclass(member): if hasattr(member, "__name__"): name = member.__name__ else: name = str(member) if name not in additional_types: additional_types[name] = member return additional_types if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--check", action="store_true") args = parser.parse_args() # Enable execution from the candle and candle-pyo3 directories cwd = Path.cwd() directory = "py_src/candle/" if cwd.name != "candle-pyo3": directory = f"candle-pyo3/{directory}" import candle import _additional_typing ADDITIONAL_TYPEHINTS = extract_additional_types(_additional_typing) write(candle.candle, directory, "candle", check=args.check)
candle/candle-pyo3/stub.py/0
{ "file_path": "candle/candle-pyo3/stub.py", "repo_id": "candle", "token_count": 3931 }
41
use super::with_tracing::{layer_norm, linear, LayerNorm, Linear}; use candle::{DType, Device, Result, Tensor}; use candle_nn::{embedding, Embedding, Module, VarBuilder}; use serde::Deserialize; pub const DTYPE: DType = DType::F32; #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize)] #[serde(rename_all = "lowercase")] pub enum HiddenAct { Gelu, GeluApproximate, Relu, } struct HiddenActLayer { act: HiddenAct, span: tracing::Span, } impl HiddenActLayer { fn new(act: HiddenAct) -> Self { let span = tracing::span!(tracing::Level::TRACE, "hidden-act"); Self { act, span } } fn forward(&self, xs: &Tensor) -> candle::Result<Tensor> { let _enter = self.span.enter(); match self.act { // https://github.com/huggingface/transformers/blob/cd4584e3c809bb9e1392ccd3fe38b40daba5519a/src/transformers/activations.py#L213 HiddenAct::Gelu => xs.gelu_erf(), HiddenAct::GeluApproximate => xs.gelu(), HiddenAct::Relu => xs.relu(), } } } #[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Default)] #[serde(rename_all = "lowercase")] enum PositionEmbeddingType { #[default] Absolute, } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/configuration_bert.py#L1 #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { vocab_size: usize, hidden_size: usize, num_hidden_layers: usize, num_attention_heads: usize, intermediate_size: usize, pub hidden_act: HiddenAct, hidden_dropout_prob: f64, max_position_embeddings: usize, type_vocab_size: usize, initializer_range: f64, layer_norm_eps: f64, pad_token_id: usize, #[serde(default)] position_embedding_type: PositionEmbeddingType, #[serde(default)] use_cache: bool, classifier_dropout: Option<f64>, model_type: Option<String>, } impl Default for Config { fn default() -> Self { Self { vocab_size: 30522, hidden_size: 768, num_hidden_layers: 12, num_attention_heads: 12, intermediate_size: 3072, hidden_act: HiddenAct::Gelu, hidden_dropout_prob: 0.1, max_position_embeddings: 512, type_vocab_size: 2, initializer_range: 0.02, layer_norm_eps: 1e-12, pad_token_id: 0, position_embedding_type: PositionEmbeddingType::Absolute, use_cache: true, classifier_dropout: None, model_type: Some("bert".to_string()), } } } impl Config { fn _all_mini_lm_l6_v2() -> Self { // https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/blob/main/config.json Self { vocab_size: 30522, hidden_size: 384, num_hidden_layers: 6, num_attention_heads: 12, intermediate_size: 1536, hidden_act: HiddenAct::Gelu, hidden_dropout_prob: 0.1, max_position_embeddings: 512, type_vocab_size: 2, initializer_range: 0.02, layer_norm_eps: 1e-12, pad_token_id: 0, position_embedding_type: PositionEmbeddingType::Absolute, use_cache: true, classifier_dropout: None, model_type: Some("bert".to_string()), } } } struct Dropout { #[allow(dead_code)] pr: f64, } impl Dropout { fn new(pr: f64) -> Self { Self { pr } } } impl Module for Dropout { fn forward(&self, x: &Tensor) -> Result<Tensor> { // TODO Ok(x.clone()) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L180 struct BertEmbeddings { word_embeddings: Embedding, position_embeddings: Option<Embedding>, token_type_embeddings: Embedding, layer_norm: LayerNorm, dropout: Dropout, span: tracing::Span, } impl BertEmbeddings { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let word_embeddings = embedding( config.vocab_size, config.hidden_size, vb.pp("word_embeddings"), )?; let position_embeddings = embedding( config.max_position_embeddings, config.hidden_size, vb.pp("position_embeddings"), )?; let token_type_embeddings = embedding( config.type_vocab_size, config.hidden_size, vb.pp("token_type_embeddings"), )?; let layer_norm = layer_norm( config.hidden_size, config.layer_norm_eps, vb.pp("LayerNorm"), )?; Ok(Self { word_embeddings, position_embeddings: Some(position_embeddings), token_type_embeddings, layer_norm, dropout: Dropout::new(config.hidden_dropout_prob), span: tracing::span!(tracing::Level::TRACE, "embeddings"), }) } fn forward(&self, input_ids: &Tensor, token_type_ids: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (_bsize, seq_len) = input_ids.dims2()?; let input_embeddings = self.word_embeddings.forward(input_ids)?; let token_type_embeddings = self.token_type_embeddings.forward(token_type_ids)?; let mut embeddings = (&input_embeddings + token_type_embeddings)?; if let Some(position_embeddings) = &self.position_embeddings { // TODO: Proper absolute positions? let position_ids = (0..seq_len as u32).collect::<Vec<_>>(); let position_ids = Tensor::new(&position_ids[..], input_ids.device())?; embeddings = embeddings.broadcast_add(&position_embeddings.forward(&position_ids)?)? } let embeddings = self.layer_norm.forward(&embeddings)?; let embeddings = self.dropout.forward(&embeddings)?; Ok(embeddings) } } struct BertSelfAttention { query: Linear, key: Linear, value: Linear, dropout: Dropout, num_attention_heads: usize, attention_head_size: usize, span: tracing::Span, span_softmax: tracing::Span, } impl BertSelfAttention { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let attention_head_size = config.hidden_size / config.num_attention_heads; let all_head_size = config.num_attention_heads * attention_head_size; let dropout = Dropout::new(config.hidden_dropout_prob); let hidden_size = config.hidden_size; let query = linear(hidden_size, all_head_size, vb.pp("query"))?; let value = linear(hidden_size, all_head_size, vb.pp("value"))?; let key = linear(hidden_size, all_head_size, vb.pp("key"))?; Ok(Self { query, key, value, dropout, num_attention_heads: config.num_attention_heads, attention_head_size, span: tracing::span!(tracing::Level::TRACE, "self-attn"), span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"), }) } fn transpose_for_scores(&self, xs: &Tensor) -> Result<Tensor> { let mut new_x_shape = xs.dims().to_vec(); new_x_shape.pop(); new_x_shape.push(self.num_attention_heads); new_x_shape.push(self.attention_head_size); let xs = xs.reshape(new_x_shape.as_slice())?.transpose(1, 2)?; xs.contiguous() } fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let query_layer = self.query.forward(hidden_states)?; let key_layer = self.key.forward(hidden_states)?; let value_layer = self.value.forward(hidden_states)?; let query_layer = self.transpose_for_scores(&query_layer)?; let key_layer = self.transpose_for_scores(&key_layer)?; let value_layer = self.transpose_for_scores(&value_layer)?; let attention_scores = query_layer.matmul(&key_layer.t()?)?; let attention_scores = (attention_scores / (self.attention_head_size as f64).sqrt())?; let attention_scores = attention_scores.broadcast_add(attention_mask)?; let attention_probs = { let _enter_sm = self.span_softmax.enter(); candle_nn::ops::softmax(&attention_scores, candle::D::Minus1)? }; let attention_probs = self.dropout.forward(&attention_probs)?; let context_layer = attention_probs.matmul(&value_layer)?; let context_layer = context_layer.transpose(1, 2)?.contiguous()?; let context_layer = context_layer.flatten_from(candle::D::Minus2)?; Ok(context_layer) } } struct BertSelfOutput { dense: Linear, layer_norm: LayerNorm, dropout: Dropout, span: tracing::Span, } impl BertSelfOutput { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let dense = linear(config.hidden_size, config.hidden_size, vb.pp("dense"))?; let layer_norm = layer_norm( config.hidden_size, config.layer_norm_eps, vb.pp("LayerNorm"), )?; let dropout = Dropout::new(config.hidden_dropout_prob); Ok(Self { dense, layer_norm, dropout, span: tracing::span!(tracing::Level::TRACE, "self-out"), }) } fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states = self.dense.forward(hidden_states)?; let hidden_states = self.dropout.forward(&hidden_states)?; self.layer_norm.forward(&(hidden_states + input_tensor)?) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L392 struct BertAttention { self_attention: BertSelfAttention, self_output: BertSelfOutput, span: tracing::Span, } impl BertAttention { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let self_attention = BertSelfAttention::load(vb.pp("self"), config)?; let self_output = BertSelfOutput::load(vb.pp("output"), config)?; Ok(Self { self_attention, self_output, span: tracing::span!(tracing::Level::TRACE, "attn"), }) } fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let self_outputs = self.self_attention.forward(hidden_states, attention_mask)?; let attention_output = self.self_output.forward(&self_outputs, hidden_states)?; Ok(attention_output) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L441 struct BertIntermediate { dense: Linear, intermediate_act: HiddenActLayer, span: tracing::Span, } impl BertIntermediate { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let dense = linear(config.hidden_size, config.intermediate_size, vb.pp("dense"))?; Ok(Self { dense, intermediate_act: HiddenActLayer::new(config.hidden_act), span: tracing::span!(tracing::Level::TRACE, "inter"), }) } } impl Module for BertIntermediate { fn forward(&self, hidden_states: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states = self.dense.forward(hidden_states)?; let ys = self.intermediate_act.forward(&hidden_states)?; Ok(ys) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L456 struct BertOutput { dense: Linear, layer_norm: LayerNorm, dropout: Dropout, span: tracing::Span, } impl BertOutput { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let dense = linear(config.intermediate_size, config.hidden_size, vb.pp("dense"))?; let layer_norm = layer_norm( config.hidden_size, config.layer_norm_eps, vb.pp("LayerNorm"), )?; let dropout = Dropout::new(config.hidden_dropout_prob); Ok(Self { dense, layer_norm, dropout, span: tracing::span!(tracing::Level::TRACE, "out"), }) } fn forward(&self, hidden_states: &Tensor, input_tensor: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states = self.dense.forward(hidden_states)?; let hidden_states = self.dropout.forward(&hidden_states)?; self.layer_norm.forward(&(hidden_states + input_tensor)?) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L470 struct BertLayer { attention: BertAttention, intermediate: BertIntermediate, output: BertOutput, span: tracing::Span, } impl BertLayer { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let attention = BertAttention::load(vb.pp("attention"), config)?; let intermediate = BertIntermediate::load(vb.pp("intermediate"), config)?; let output = BertOutput::load(vb.pp("output"), config)?; Ok(Self { attention, intermediate, output, span: tracing::span!(tracing::Level::TRACE, "layer"), }) } fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let attention_output = self.attention.forward(hidden_states, attention_mask)?; // TODO: Support cross-attention? // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523 // TODO: Support something similar to `apply_chunking_to_forward`? let intermediate_output = self.intermediate.forward(&attention_output)?; let layer_output = self .output .forward(&intermediate_output, &attention_output)?; Ok(layer_output) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L556 struct BertEncoder { layers: Vec<BertLayer>, span: tracing::Span, } impl BertEncoder { fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let layers = (0..config.num_hidden_layers) .map(|index| BertLayer::load(vb.pp(&format!("layer.{index}")), config)) .collect::<Result<Vec<_>>>()?; let span = tracing::span!(tracing::Level::TRACE, "encoder"); Ok(BertEncoder { layers, span }) } fn forward(&self, hidden_states: &Tensor, attention_mask: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let mut hidden_states = hidden_states.clone(); // Use a loop rather than a fold as it's easier to modify when adding debug/... for layer in self.layers.iter() { hidden_states = layer.forward(&hidden_states, attention_mask)? } Ok(hidden_states) } } // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L874 pub struct BertModel { embeddings: BertEmbeddings, encoder: BertEncoder, pub device: Device, span: tracing::Span, } impl BertModel { pub fn load(vb: VarBuilder, config: &Config) -> Result<Self> { let (embeddings, encoder) = match ( BertEmbeddings::load(vb.pp("embeddings"), config), BertEncoder::load(vb.pp("encoder"), config), ) { (Ok(embeddings), Ok(encoder)) => (embeddings, encoder), (Err(err), _) | (_, Err(err)) => { if let Some(model_type) = &config.model_type { if let (Ok(embeddings), Ok(encoder)) = ( BertEmbeddings::load(vb.pp(&format!("{model_type}.embeddings")), config), BertEncoder::load(vb.pp(&format!("{model_type}.encoder")), config), ) { (embeddings, encoder) } else { return Err(err); } } else { return Err(err); } } }; Ok(Self { embeddings, encoder, device: vb.device().clone(), span: tracing::span!(tracing::Level::TRACE, "model"), }) } pub fn forward( &self, input_ids: &Tensor, token_type_ids: &Tensor, attention_mask: Option<&Tensor>, ) -> Result<Tensor> { let _enter = self.span.enter(); let embedding_output = self.embeddings.forward(input_ids, token_type_ids)?; let attention_mask = match attention_mask { Some(attention_mask) => attention_mask.clone(), None => input_ids.ones_like()?, }; // https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L995 let attention_mask = get_extended_attention_mask(&attention_mask, DType::F32)?; let sequence_output = self.encoder.forward(&embedding_output, &attention_mask)?; Ok(sequence_output) } } fn get_extended_attention_mask(attention_mask: &Tensor, dtype: DType) -> Result<Tensor> { let attention_mask = match attention_mask.rank() { 3 => attention_mask.unsqueeze(1)?, 2 => attention_mask.unsqueeze(1)?.unsqueeze(1)?, _ => candle::bail!("Wrong shape for input_ids or attention_mask"), }; let attention_mask = attention_mask.to_dtype(dtype)?; // torch.finfo(dtype).min (attention_mask.ones_like()? - &attention_mask)? .broadcast_mul(&Tensor::try_from(f32::MIN)?.to_device(attention_mask.device())?) }
candle/candle-transformers/src/models/bert.rs/0
{ "file_path": "candle/candle-transformers/src/models/bert.rs", "repo_id": "candle", "token_count": 8444 }
42
use candle::{Result, Tensor, D}; use candle_nn as nn; use nn::{Module, VarBuilder}; // Based on the Python version from torchvision. // https://github.com/pytorch/vision/blob/0d75d9e5516f446c9c0ef93bd4ed9fea13992d06/torchvision/models/efficientnet.py#L47 #[derive(Debug, Clone, Copy)] pub struct MBConvConfig { expand_ratio: f64, kernel: usize, stride: usize, input_channels: usize, out_channels: usize, num_layers: usize, } fn make_divisible(v: f64, divisor: usize) -> usize { let min_value = divisor; let new_v = usize::max( min_value, (v + divisor as f64 * 0.5) as usize / divisor * divisor, ); if (new_v as f64) < 0.9 * v { new_v + divisor } else { new_v } } fn bneck_confs(width_mult: f64, depth_mult: f64) -> Vec<MBConvConfig> { let bneck_conf = |e, k, s, i, o, n| { let input_channels = make_divisible(i as f64 * width_mult, 8); let out_channels = make_divisible(o as f64 * width_mult, 8); let num_layers = (n as f64 * depth_mult).ceil() as usize; MBConvConfig { expand_ratio: e, kernel: k, stride: s, input_channels, out_channels, num_layers, } }; vec![ bneck_conf(1., 3, 1, 32, 16, 1), bneck_conf(6., 3, 2, 16, 24, 2), bneck_conf(6., 5, 2, 24, 40, 2), bneck_conf(6., 3, 2, 40, 80, 3), bneck_conf(6., 5, 1, 80, 112, 3), bneck_conf(6., 5, 2, 112, 192, 4), bneck_conf(6., 3, 1, 192, 320, 1), ] } impl MBConvConfig { pub fn b0() -> Vec<Self> { bneck_confs(1.0, 1.0) } pub fn b1() -> Vec<Self> { bneck_confs(1.0, 1.1) } pub fn b2() -> Vec<Self> { bneck_confs(1.1, 1.2) } pub fn b3() -> Vec<Self> { bneck_confs(1.2, 1.4) } pub fn b4() -> Vec<Self> { bneck_confs(1.4, 1.8) } pub fn b5() -> Vec<Self> { bneck_confs(1.6, 2.2) } pub fn b6() -> Vec<Self> { bneck_confs(1.8, 2.6) } pub fn b7() -> Vec<Self> { bneck_confs(2.0, 3.1) } } /// Conv2D with same padding. #[derive(Debug)] struct Conv2DSame { conv2d: nn::Conv2d, s: usize, k: usize, } impl Conv2DSame { fn new( vb: VarBuilder, i: usize, o: usize, k: usize, stride: usize, groups: usize, bias: bool, ) -> Result<Self> { let conv_config = nn::Conv2dConfig { stride, groups, ..Default::default() }; let conv2d = if bias { nn::conv2d(i, o, k, conv_config, vb)? } else { nn::conv2d_no_bias(i, o, k, conv_config, vb)? }; Ok(Self { conv2d, s: stride, k, }) } } impl Module for Conv2DSame { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let s = self.s; let k = self.k; let (_, _, ih, iw) = xs.dims4()?; let oh = (ih + s - 1) / s; let ow = (iw + s - 1) / s; let pad_h = usize::max((oh - 1) * s + k - ih, 0); let pad_w = usize::max((ow - 1) * s + k - iw, 0); if pad_h > 0 || pad_w > 0 { let xs = xs.pad_with_zeros(2, pad_h / 2, pad_h - pad_h / 2)?; let xs = xs.pad_with_zeros(3, pad_w / 2, pad_w - pad_w / 2)?; self.conv2d.forward(&xs) } else { self.conv2d.forward(xs) } } } #[derive(Debug)] struct ConvNormActivation { conv2d: Conv2DSame, bn2d: nn::BatchNorm, activation: bool, } impl ConvNormActivation { fn new( vb: VarBuilder, i: usize, o: usize, k: usize, stride: usize, groups: usize, ) -> Result<Self> { let conv2d = Conv2DSame::new(vb.pp("0"), i, o, k, stride, groups, false)?; let bn2d = nn::batch_norm(o, 1e-3, vb.pp("1"))?; Ok(Self { conv2d, bn2d, activation: true, }) } fn no_activation(self) -> Self { Self { activation: false, ..self } } } impl Module for ConvNormActivation { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.conv2d.forward(xs)?.apply_t(&self.bn2d, false)?; if self.activation { swish(&xs) } else { Ok(xs) } } } #[derive(Debug)] struct SqueezeExcitation { fc1: Conv2DSame, fc2: Conv2DSame, } impl SqueezeExcitation { fn new(vb: VarBuilder, in_channels: usize, squeeze_channels: usize) -> Result<Self> { let fc1 = Conv2DSame::new(vb.pp("fc1"), in_channels, squeeze_channels, 1, 1, 1, true)?; let fc2 = Conv2DSame::new(vb.pp("fc2"), squeeze_channels, in_channels, 1, 1, 1, true)?; Ok(Self { fc1, fc2 }) } } impl Module for SqueezeExcitation { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; // equivalent to adaptive_avg_pool2d([1, 1]) let xs = xs.mean_keepdim(D::Minus2)?.mean_keepdim(D::Minus1)?; let xs = self.fc1.forward(&xs)?; let xs = swish(&xs)?; let xs = self.fc2.forward(&xs)?; let xs = nn::ops::sigmoid(&xs)?; residual.broadcast_mul(&xs) } } #[derive(Debug)] struct MBConv { expand_cna: Option<ConvNormActivation>, depthwise_cna: ConvNormActivation, squeeze_excitation: SqueezeExcitation, project_cna: ConvNormActivation, config: MBConvConfig, } impl MBConv { fn new(vb: VarBuilder, c: MBConvConfig) -> Result<Self> { let vb = vb.pp("block"); let exp = make_divisible(c.input_channels as f64 * c.expand_ratio, 8); let expand_cna = if exp != c.input_channels { Some(ConvNormActivation::new( vb.pp("0"), c.input_channels, exp, 1, 1, 1, )?) } else { None }; let start_index = if expand_cna.is_some() { 1 } else { 0 }; let depthwise_cna = ConvNormActivation::new(vb.pp(start_index), exp, exp, c.kernel, c.stride, exp)?; let squeeze_channels = usize::max(1, c.input_channels / 4); let squeeze_excitation = SqueezeExcitation::new(vb.pp(start_index + 1), exp, squeeze_channels)?; let project_cna = ConvNormActivation::new(vb.pp(start_index + 2), exp, c.out_channels, 1, 1, 1)? .no_activation(); Ok(Self { expand_cna, depthwise_cna, squeeze_excitation, project_cna, config: c, }) } } impl Module for MBConv { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let use_res_connect = self.config.stride == 1 && self.config.input_channels == self.config.out_channels; let ys = match &self.expand_cna { Some(expand_cna) => expand_cna.forward(xs)?, None => xs.clone(), }; let ys = self.depthwise_cna.forward(&ys)?; let ys = self.squeeze_excitation.forward(&ys)?; let ys = self.project_cna.forward(&ys)?; if use_res_connect { ys + xs } else { Ok(ys) } } } fn swish(s: &Tensor) -> Result<Tensor> { s * nn::ops::sigmoid(s)? } #[derive(Debug)] pub struct EfficientNet { init_cna: ConvNormActivation, blocks: Vec<MBConv>, final_cna: ConvNormActivation, classifier: nn::Linear, } impl EfficientNet { pub fn new(p: VarBuilder, configs: Vec<MBConvConfig>, nclasses: usize) -> Result<Self> { let f_p = p.pp("features"); let first_in_c = configs[0].input_channels; let last_out_c = configs.last().unwrap().out_channels; let final_out_c = 4 * last_out_c; let init_cna = ConvNormActivation::new(f_p.pp(0), 3, first_in_c, 3, 2, 1)?; let nconfigs = configs.len(); let mut blocks = vec![]; for (index, cnf) in configs.into_iter().enumerate() { let f_p = f_p.pp(index + 1); for r_index in 0..cnf.num_layers { let cnf = if r_index == 0 { cnf } else { MBConvConfig { input_channels: cnf.out_channels, stride: 1, ..cnf } }; blocks.push(MBConv::new(f_p.pp(r_index), cnf)?) } } let final_cna = ConvNormActivation::new(f_p.pp(nconfigs + 1), last_out_c, final_out_c, 1, 1, 1)?; let classifier = nn::linear(final_out_c, nclasses, p.pp("classifier.1"))?; Ok(Self { init_cna, blocks, final_cna, classifier, }) } } impl Module for EfficientNet { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.init_cna.forward(xs)?; for block in self.blocks.iter() { xs = block.forward(&xs)? } let xs = self.final_cna.forward(&xs)?; // Equivalent to adaptive_avg_pool2d([1, 1]) -> squeeze(-1) -> squeeze(-1) let xs = xs.mean(D::Minus1)?.mean(D::Minus1)?; self.classifier.forward(&xs) } }
candle/candle-transformers/src/models/efficientnet.rs/0
{ "file_path": "candle/candle-transformers/src/models/efficientnet.rs", "repo_id": "candle", "token_count": 5123 }
43
use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::linear_no_bias as linear; use candle_nn::{embedding, rms_norm, Embedding, Linear, Module, RmsNorm, VarBuilder}; use std::collections::HashMap; #[derive(Debug, Clone)] pub struct Config { pub dim: usize, // transformer dimension pub hidden_dim: usize, // for ffn layers pub n_layers: usize, // number of layers pub n_heads: usize, // number of query heads pub n_kv_heads: usize, // number of key/value heads (can be < query heads because of multiquery) pub vocab_size: usize, // vocabulary size, usually 256 (byte-level) pub seq_len: usize, // max sequence length pub norm_eps: f64, } impl Config { pub fn tiny_260k() -> Self { Self { dim: 64, hidden_dim: 768, n_layers: 5, n_heads: 8, n_kv_heads: 4, vocab_size: 32000, seq_len: 512, norm_eps: 1e-5, } } pub fn tiny_15m() -> Self { Self { dim: 288, hidden_dim: 768, n_layers: 6, n_heads: 6, n_kv_heads: 6, vocab_size: 32000, seq_len: 256, norm_eps: 1e-5, } } pub fn tiny_42m() -> Self { Self { dim: 512, hidden_dim: 768, n_layers: 8, n_heads: 8, n_kv_heads: 8, vocab_size: 32000, seq_len: 1024, norm_eps: 1e-5, } } pub fn tiny_110m() -> Self { Self { dim: 768, hidden_dim: 768, n_layers: 12, n_heads: 12, n_kv_heads: 12, vocab_size: 32000, seq_len: 1024, norm_eps: 1e-5, } } } #[derive(Debug, Clone)] pub struct Cache { masks: HashMap<usize, Tensor>, pub use_kv_cache: bool, pub kvs: Vec<Option<(Tensor, Tensor)>>, pub cos: Tensor, pub sin: Tensor, device: Device, } impl Cache { pub fn new(use_kv_cache: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> { let n_elem = cfg.dim / cfg.n_heads; let theta: Vec<_> = (0..n_elem) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / n_elem as f32)) .collect(); let theta = Tensor::new(theta.as_slice(), vb.device())?; let idx_theta = Tensor::arange(0, cfg.seq_len as u32, vb.device())? .to_dtype(DType::F32)? .reshape((cfg.seq_len, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; let precomputed_cos = idx_theta.cos()?; let precomputed_sin = idx_theta.sin()?; let freq_cis_real = vb .get((cfg.seq_len, cfg.head_size() / 2), "freq_cis_real") .unwrap_or(precomputed_cos); let freq_cis_imag = vb .get((cfg.seq_len, cfg.head_size() / 2), "freq_cis_imag") .unwrap_or(precomputed_sin); let cos = freq_cis_real.reshape((cfg.seq_len, cfg.head_size() / 2, 1))?; let sin = freq_cis_imag.reshape((cfg.seq_len, cfg.head_size() / 2, 1))?; Ok(Self { masks: HashMap::new(), use_kv_cache, kvs: vec![None; cfg.n_layers], cos, sin, device: vb.device().clone(), }) } pub fn mask(&mut self, t: usize) -> Result<Tensor> { if let Some(mask) = self.masks.get(&t) { Ok(mask.clone()) } else { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), &self.device)?; self.masks.insert(t, mask.clone()); Ok(mask) } } } fn silu(xs: &Tensor) -> Result<Tensor> { xs / (xs.neg()?.exp()? + 1.0)? } #[derive(Debug, Clone)] struct CausalSelfAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, n_head: usize, n_key_value_head: usize, head_dim: usize, } impl CausalSelfAttention { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize, cache: &Cache) -> Result<Tensor> { let (b_sz, seq_len, h, n_embd) = x.dims4()?; let cos = cache.cos.i(index_pos..index_pos + seq_len)?; let sin = cache.sin.i(index_pos..index_pos + seq_len)?; let cos = cos.unsqueeze(1)?; let sin = sin.unsqueeze(1)?; let cos = cos.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let sin = sin.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let x = x.reshape((b_sz, seq_len, h, n_embd / 2, 2))?; let x0 = x.narrow(D::Minus1, 0, 1)?; let x1 = x.narrow(D::Minus1, 1, 1)?; let dst0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?; let dst1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?; let rope = Tensor::cat(&[&dst0, &dst1], D::Minus1)?.reshape((b_sz, seq_len, h, n_embd))?; Ok(rope) } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let (b_sz, seq_len, n_embd) = x.dims3()?; let q = self.q_proj.forward(x)?; let k = self.k_proj.forward(x)?; let v = self.v_proj.forward(x)?; let q = q.reshape((b_sz, seq_len, self.n_head, self.head_dim))?; let k = k.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let mut v = v.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let q = self.apply_rotary_emb(&q, index_pos, cache)?; let mut k = self.apply_rotary_emb(&k, index_pos, cache)?; if cache.use_kv_cache { if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] { k = Tensor::cat(&[cache_k, &k], 1)?.contiguous()?; v = Tensor::cat(&[cache_v, &v], 1)?.contiguous()?; } cache.kvs[block_idx] = Some((k.clone(), v.clone())) } let k = self.repeat_kv(k)?; let v = self.repeat_kv(v)?; let q = q.transpose(1, 2)?.contiguous()?; let k = k.transpose(1, 2)?.contiguous()?; let v = v.transpose(1, 2)?.contiguous()?; let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let att = if seq_len <= 1 { att } else { let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?; masked_fill(&att, &mask, f32::NEG_INFINITY)? }; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. let y = att.matmul(&v.contiguous()?)?; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?; let y = self.o_proj.forward(&y)?; Ok(y) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.n_head / self.n_key_value_head; if n_rep == 1 { Ok(x) } else { let (b_sz, seq_len, n_kv_head, head_dim) = x.dims4()?; let x = x .unsqueeze(3)? .expand((b_sz, seq_len, n_kv_head, n_rep, head_dim))? .reshape((b_sz, seq_len, n_kv_head * n_rep, head_dim))?; Ok(x) } } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let size_in = cfg.dim; let size_q = (cfg.dim / cfg.n_heads) * cfg.n_heads; let size_kv = (cfg.dim / cfg.n_heads) * cfg.n_kv_heads; let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?; let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?; let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?; let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, n_head: cfg.n_heads, n_key_value_head: cfg.n_kv_heads, head_dim: cfg.dim / cfg.n_heads, }) } } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone)] struct Mlp { c_fc1: Linear, c_fc2: Linear, c_proj: Linear, } impl Mlp { fn new(c_fc1: Linear, c_fc2: Linear, c_proj: Linear) -> Self { Self { c_fc1, c_fc2, c_proj, } } fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = (silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?; self.c_proj.forward(&x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h_size = cfg.dim; let i_size = cfg.hidden_dim; let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?; let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?; let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?; Ok(Self::new(c_fc1, c_fc2, c_proj)) } } #[derive(Debug, Clone)] struct Block { rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp, } impl Block { fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self { Self { rms_1, attn, rms_2, mlp, } } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let residual = x; let x = self.rms_1.forward(x)?; let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?; let residual = &x; let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?; Ok(x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?; let mlp = Mlp::load(vb.pp("mlp"), cfg)?; let input_layernorm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("post_attention_layernorm"))?; Ok(Self::new( input_layernorm, attn, post_attention_layernorm, mlp, )) } } #[derive(Debug, Clone)] pub struct Llama { wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear, pub config: Config, } impl Llama { pub fn forward(&self, x: &Tensor, index_pos: usize, cache: &mut Cache) -> Result<Tensor> { let (_b_sz, _seq_len) = x.dims2()?; let mut x = self.wte.forward(x)?; for (block_idx, block) in self.blocks.iter().enumerate() { x = block.forward(&x, index_pos, block_idx, cache)?; } let x = self.ln_f.forward(&x)?; let logits = self.lm_head.forward(&x)?; logits.to_dtype(DType::F32) } pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> { let wte = embedding(cfg.vocab_size, cfg.dim, vb.pp("model.embed_tokens"))?; let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?; let ln_f = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.n_layers) .map(|i| Block::load(vb.pp(&format!("model.layers.{i}")), &cfg).unwrap()) .collect(); Ok(Self { wte, blocks, ln_f, lm_head, config: cfg, }) } }
candle/candle-transformers/src/models/llama2_c.rs/0
{ "file_path": "candle/candle-transformers/src/models/llama2_c.rs", "repo_id": "candle", "token_count": 6478 }
44
use super::fastvit; use super::openclip::text_model; use candle::{Result, Tensor, D}; use candle_nn::{Func, VarBuilder}; #[derive(Clone, Debug)] pub struct MobileClipModel { text_model: text_model::OpenClipTextTransformer, vision_model: Func<'static>, text_projection: Tensor, logit_scale: Tensor, } #[derive(Clone, Debug)] pub struct MobileClipConfig { pub text_config: text_model::Config, pub vision_config: fastvit::Config, pub image_size: usize, } impl MobileClipConfig { pub fn s1() -> Self { let text_config = text_model::Config::vit_base_patch32(); let vision_config = fastvit::Config::mci1(); Self { text_config, vision_config, image_size: 256, } } pub fn s2() -> Self { let text_config = text_model::Config::vit_base_patch32(); let vision_config = fastvit::Config::mci2(); Self { text_config, vision_config, image_size: 256, } } } impl MobileClipModel { pub fn new(vs: VarBuilder, c: &MobileClipConfig) -> Result<Self> { let vision_model = fastvit::fastvit(&c.vision_config, 512, vs.pp("visual.trunk"))?; let text_model = text_model::OpenClipTextTransformer::new(vs.pp("text"), &c.text_config)?; let text_projection = vs.get( (c.text_config.embed_dim, c.text_config.projection_dim), "text.text_projection", )?; let logit_scale = vs.get(&[], "logit_scale")?; Ok(Self { text_model, vision_model, text_projection, logit_scale, }) } pub fn get_text_features(&self, input_ids: &Tensor) -> Result<Tensor> { input_ids .apply(&self.text_model)? .matmul(&self.text_projection) } pub fn get_image_features(&self, pixel_values: &Tensor) -> Result<Tensor> { pixel_values.apply(&self.vision_model) } pub fn forward(&self, pixel_values: &Tensor, input_ids: &Tensor) -> Result<(Tensor, Tensor)> { let image_features = self.get_image_features(pixel_values)?; let text_features = self.get_text_features(input_ids)?; let image_features_normalized = div_l2_norm(&image_features)?; let text_features_normalized = div_l2_norm(&text_features)?; let logits_per_text = text_features_normalized.matmul(&image_features_normalized.t()?)?; let logit_scale = self.logit_scale.exp()?; let logits_per_text = logits_per_text.broadcast_mul(&logit_scale)?; let logits_per_image = logits_per_text.t()?; Ok((logits_per_text, logits_per_image)) } } pub fn div_l2_norm(v: &Tensor) -> Result<Tensor> { let l2_norm = v.sqr()?.sum_keepdim(D::Minus1)?.sqrt()?; v.broadcast_div(&l2_norm) }
candle/candle-transformers/src/models/mobileclip.rs/0
{ "file_path": "candle/candle-transformers/src/models/mobileclip.rs", "repo_id": "candle", "token_count": 1310 }
45
use super::llama2_c::{Cache, Config}; use crate::quantized_nn::{linear_no_bias as linear, Embedding, Linear, RmsNorm}; pub use crate::quantized_var_builder::VarBuilder; use candle::{DType, IndexOp, Module, Result, Tensor, D}; fn silu(xs: &Tensor) -> Result<Tensor> { xs / (xs.neg()?.exp()? + 1.0)? } #[derive(Debug, Clone)] struct CausalSelfAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, n_head: usize, n_key_value_head: usize, head_dim: usize, } impl CausalSelfAttention { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize, cache: &Cache) -> Result<Tensor> { let (b_sz, seq_len, h, n_embd) = x.dims4()?; let cos = cache.cos.i(index_pos..index_pos + seq_len)?; let sin = cache.sin.i(index_pos..index_pos + seq_len)?; let cos = cos.unsqueeze(1)?; let sin = sin.unsqueeze(1)?; let cos = cos.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let sin = sin.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let x = x.reshape((b_sz, seq_len, h, n_embd / 2, 2))?; let x0 = x.narrow(D::Minus1, 0, 1)?; let x1 = x.narrow(D::Minus1, 1, 1)?; let dst0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?; let dst1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?; let rope = Tensor::cat(&[&dst0, &dst1], D::Minus1)?.reshape((b_sz, seq_len, h, n_embd))?; Ok(rope) } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let (b_sz, seq_len, n_embd) = x.dims3()?; let q = self.q_proj.forward(x)?; let k = self.k_proj.forward(x)?; let v = self.v_proj.forward(x)?; let q = q.reshape((b_sz, seq_len, self.n_head, self.head_dim))?; let k = k.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let mut v = v.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let q = self.apply_rotary_emb(&q, index_pos, cache)?; let mut k = self.apply_rotary_emb(&k, index_pos, cache)?; if cache.use_kv_cache { if let Some((cache_k, cache_v)) = &cache.kvs[block_idx] { k = Tensor::cat(&[cache_k, &k], 1)?.contiguous()?; v = Tensor::cat(&[cache_v, &v], 1)?.contiguous()?; } cache.kvs[block_idx] = Some((k.clone(), v.clone())) } let k = self.repeat_kv(k)?; let v = self.repeat_kv(v)?; let q = q.transpose(1, 2)?.contiguous()?; let k = k.transpose(1, 2)?.contiguous()?; let v = v.transpose(1, 2)?.contiguous()?; let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let att = if seq_len <= 1 { att } else { let mask = cache.mask(seq_len)?.broadcast_as(att.shape())?; masked_fill(&att, &mask, f32::NEG_INFINITY)? }; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. let y = att.matmul(&v.contiguous()?)?; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?; let y = self.o_proj.forward(&y)?; Ok(y) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.n_head / self.n_key_value_head; if n_rep == 1 { Ok(x) } else { let (b_sz, seq_len, n_kv_head, head_dim) = x.dims4()?; let x = x .unsqueeze(3)? .expand((b_sz, seq_len, n_kv_head, n_rep, head_dim))? .reshape((b_sz, seq_len, n_kv_head * n_rep, head_dim))?; Ok(x) } } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let size_in = cfg.dim; let size_q = (cfg.dim / cfg.n_heads) * cfg.n_heads; let size_kv = (cfg.dim / cfg.n_heads) * cfg.n_kv_heads; let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?; let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?; let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?; let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, n_head: cfg.n_heads, n_key_value_head: cfg.n_kv_heads, head_dim: cfg.dim / cfg.n_heads, }) } } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone)] struct Mlp { c_fc1: Linear, c_fc2: Linear, c_proj: Linear, } impl Mlp { fn new(c_fc1: Linear, c_fc2: Linear, c_proj: Linear) -> Self { Self { c_fc1, c_fc2, c_proj, } } fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = (silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?; self.c_proj.forward(&x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h_size = cfg.dim; let i_size = cfg.hidden_dim; let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?; let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?; let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?; Ok(Self::new(c_fc1, c_fc2, c_proj)) } } #[derive(Debug, Clone)] struct Block { rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp, } impl Block { fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self { Self { rms_1, attn, rms_2, mlp, } } fn forward( &self, x: &Tensor, index_pos: usize, block_idx: usize, cache: &mut Cache, ) -> Result<Tensor> { let residual = x; let x = self.rms_1.forward(x)?; let x = (self.attn.forward(&x, index_pos, block_idx, cache)? + residual)?; let residual = &x; let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?; Ok(x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let attn = CausalSelfAttention::load(vb.pp("self_attn"), cfg)?; let mlp = Mlp::load(vb.pp("mlp"), cfg)?; let input_layernorm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("post_attention_layernorm"))?; Ok(Self::new( input_layernorm, attn, post_attention_layernorm, mlp, )) } } #[derive(Debug, Clone)] pub struct QLlama { wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear, pub config: Config, } impl QLlama { pub fn forward(&self, x: &Tensor, index_pos: usize, cache: &mut Cache) -> Result<Tensor> { let (_b_sz, _seq_len) = x.dims2()?; let mut x = self.wte.forward(x)?; for (block_idx, block) in self.blocks.iter().enumerate() { x = block.forward(&x, index_pos, block_idx, cache)?; } let x = self.ln_f.forward(&x)?; let logits = self.lm_head.forward(&x)?; logits.to_dtype(DType::F32) } pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> { let wte = Embedding::new(cfg.vocab_size, cfg.dim, vb.pp("model.embed_tokens"))?; let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?; let ln_f = RmsNorm::new(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.n_layers) .map(|i| Block::load(vb.pp(format!("model.layers.{i}")), &cfg).unwrap()) .collect(); Ok(Self { wte, blocks, ln_f, lm_head, config: cfg, }) } }
candle/candle-transformers/src/models/quantized_llama2_c.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_llama2_c.rs", "repo_id": "candle", "token_count": 4430 }
46
// This implementation is based on the python version from huggingface/transformers. // https://github.com/huggingface/transformers/blob/b109257f4fb8b1166e7c53cc5418632014ed53a5/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py#L2 use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{linear_b as linear, Linear, VarBuilder}; use std::sync::Arc; #[derive(serde::Deserialize, Debug, Clone, Copy)] #[serde(rename_all = "snake_case")] pub enum TemporalBlockType { Attention, Recurrent, } #[derive(serde::Deserialize, Debug, Clone)] pub struct Config { pub num_hidden_layers: usize, pub vocab_size: usize, pub hidden_size: usize, pub intermediate_size: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub head_dim: usize, pub lru_width: Option<usize>, pub attention_window_size: usize, pub conv1d_width: usize, pub logits_soft_cap: f64, pub hidden_activation: candle_nn::Activation, pub partial_rotary_factor: f64, pub rms_norm_eps: f64, pub rope_theta: f64, #[serde(alias = "_block_types")] pub block_types: Vec<TemporalBlockType>, pub attention_bias: bool, #[serde(default = "default_max_seq_len")] pub max_seq_len: usize, } fn default_max_seq_len() -> usize { 8192 } #[derive(Debug, Clone)] pub(crate) struct RmsNorm { weight: Tensor, eps: f64, } impl RmsNorm { pub(crate) fn new(dim: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let weight = vb.get(dim, "weight")?; Ok(Self { weight, eps }) } pub(crate) fn from_weight(weight: Tensor, eps: f64) -> Self { Self { weight, eps } } } impl Module for RmsNorm { fn forward(&self, x: &Tensor) -> Result<Tensor> { let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; let hidden_size = x.dim(D::Minus1)?; let x = x.to_dtype(internal_dtype)?; let norm_x = (x.sqr()?.sum_keepdim(D::Minus1)? / hidden_size as f64)?; let x_normed = x.broadcast_div(&(norm_x + self.eps)?.sqrt()?)?; x_normed .to_dtype(x_dtype)? .broadcast_mul(&(&self.weight + 1.0)?) } } #[derive(Debug, Clone)] pub(crate) struct RotaryEmbedding { sin: Tensor, cos: Tensor, } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let last_dim = xs.dim(D::Minus1)?; let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?; let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?; Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1) } impl RotaryEmbedding { pub(crate) fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { if cfg.partial_rotary_factor != 0.5 { candle::bail!("partial-rotary-factor {} <> 0.5", cfg.partial_rotary_factor) } let dim = cfg.head_dim / 2; let max_seq_len = cfg.max_seq_len; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; let freqs = Tensor::cat(&[&freqs, &freqs], D::Minus1)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } pub(crate) fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let sin = sin.unsqueeze(0)?.unsqueeze(0)?; // (1, 1, seq_len, dim) let q_embed = (q.broadcast_mul(&cos)? + rotate_half(q)?.broadcast_mul(&sin))?; let k_embed = (k.broadcast_mul(&cos)? + rotate_half(k)?.broadcast_mul(&sin))?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] struct Mlp { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: candle_nn::Activation, } impl Mlp { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let h = cfg.hidden_size; let intermediate_size = cfg.intermediate_size / 2; let gate_proj = linear(h, intermediate_size, true, vb.pp("gate_proj"))?; let up_proj = linear(h, intermediate_size, true, vb.pp("up_proj"))?; let down_proj = linear(intermediate_size, h, true, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_activation, }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let gate = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; (gate * xs.apply(&self.up_proj))?.apply(&self.down_proj) } } // Real-Gated Linear Recurrent Unit #[derive(Debug, Clone)] pub(crate) struct Rglru { pub(crate) recurrent_param: Tensor, pub(crate) input_gate_weight: Tensor, pub(crate) input_gate_bias: Tensor, pub(crate) recurrent_gate_weight: Tensor, pub(crate) recurrent_gate_bias: Tensor, pub(crate) block_width: usize, pub(crate) n_heads: usize, pub(crate) recurrent_states: Option<Tensor>, } fn baddbmm(a: &Tensor, b: &Tensor, c: &Tensor) -> Result<Tensor> { a.broadcast_add(&b.matmul(c)?) } fn softplus(xs: &Tensor) -> Result<Tensor> { (xs.exp()? + 1.0)?.log() } impl Rglru { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let h = cfg.hidden_size; let lru_width = cfg.lru_width.unwrap_or(h); let n_heads = cfg.num_attention_heads; let block_width = lru_width / n_heads; let recurrent_param = vb.get((lru_width,), "recurrent_param")?; let input_gate_weight = vb.get((n_heads, block_width, block_width), "input_gate_weight")?; let input_gate_bias = vb.get((n_heads, block_width), "input_gate_bias")?; let recurrent_gate_weight = vb.get((n_heads, block_width, block_width), "recurrent_gate_weight")?; let recurrent_gate_bias = vb.get((n_heads, block_width), "recurrent_gate_bias")?; Ok(Self { recurrent_param, input_gate_bias, input_gate_weight, recurrent_gate_bias, recurrent_gate_weight, block_width, n_heads, recurrent_states: None, }) } // https://github.com/huggingface/transformers/blob/0bd58f1ce0573c0e3269de4215a17d318add49b9/src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py#L303 pub(crate) fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> { let (b_sz, seq_len, lru_width) = xs.dims3()?; let pos = Tensor::arange(pos as u32, (pos + seq_len) as u32, xs.device())?; let reset = pos.eq(0u32)?.unsqueeze(1)?.unsqueeze(0)?; let reshape_act = xs .reshape((b_sz * seq_len, self.n_heads, self.block_width))? .permute((1, 0, 2))? .contiguous()?; let res = baddbmm( &self.input_gate_bias.unsqueeze(1)?, &reshape_act, &self.input_gate_weight, )?; let input_gate = res.transpose(0, 1)?.reshape((b_sz, seq_len, lru_width))?; let input_gate = candle_nn::ops::sigmoid(&input_gate)?; let res = baddbmm( &self.recurrent_gate_bias.unsqueeze(1)?, &reshape_act, &self.recurrent_gate_weight, )?; let recurrent_gate = res.transpose(0, 1)?.reshape((b_sz, seq_len, lru_width))?; let recurrent_gate = candle_nn::ops::sigmoid(&recurrent_gate)?; let log_recurrent_gate = (recurrent_gate * (-8.0))?.broadcast_mul(&softplus(&self.recurrent_param)?)?; let recurrent_gate = log_recurrent_gate.exp()?; let a_square = (log_recurrent_gate * 2.)?.exp()?; // Gate the input. let gated_inputs = (xs * input_gate)?; let reset = reset.to_dtype(a_square.dtype())?; let multiplier = reset.broadcast_add(&((1.0 - &reset)?.broadcast_mul(&(1.0 - a_square)?.sqrt()?))?)?; let normalized_x = (gated_inputs * multiplier.to_dtype(xs.dtype()))?; let (hidden_states, recurrent_states) = rnn_scan( &normalized_x, &recurrent_gate, &reset, self.recurrent_states.as_ref(), )?; self.recurrent_states = Some(recurrent_states); Ok(hidden_states) } } fn rnn_scan( hidden_states: &Tensor, recurrent_gate: &Tensor, reset: &Tensor, recurrent_states: Option<&Tensor>, ) -> Result<(Tensor, Tensor)> { let acc_dtype = DType::F32; let dev = hidden_states.device(); let in_dtype = hidden_states.dtype(); let inv_reset = (1.0 - reset)?.to_dtype(recurrent_gate.dtype())?; let recurrent_gate = recurrent_gate.broadcast_mul(&inv_reset)?; let (c, r) = if hidden_states.dim(1)? == 1 { match recurrent_states { None => { let next_state = hidden_states.i((.., 0))?.to_dtype(acc_dtype)?; (hidden_states.clone(), next_state) } Some(recurrent_states) => { let contextualized_states = recurrent_gate.to_dtype(acc_dtype)? * recurrent_states.unsqueeze(1)?; let contextualized_states = (contextualized_states + hidden_states.to_dtype(acc_dtype)?)?; let c = contextualized_states.to_dtype(in_dtype)?; let l = contextualized_states.dim(1)?; let r = contextualized_states.i((.., l - 1))?; (c, r) } } } else { let mut recurrent_states = match recurrent_states { None => Tensor::zeros(hidden_states.i((.., 0))?.shape(), acc_dtype, dev)?, Some(r) => r.clone(), }; let mut contextualized_states = vec![]; for t in 0..hidden_states.dim(1)? { recurrent_states = (recurrent_gate.i((.., t))?.to_dtype(acc_dtype)? * recurrent_states)?; recurrent_states = (recurrent_states + hidden_states.i((.., t))?.to_dtype(acc_dtype)?)?; contextualized_states.push(recurrent_states.to_dtype(in_dtype)?) } let contextualized_states = Tensor::stack(&contextualized_states, 1)?; (contextualized_states, recurrent_states) }; Ok((c, r)) } #[derive(Debug, Clone)] struct RecurrentBlock { linear_y: Linear, linear_x: Linear, linear_out: Linear, conv_1d: candle_nn::Conv1d, conv1d_state: Option<Tensor>, conv1d_width: usize, rg_lru: Rglru, act_fn: candle_nn::Activation, } impl RecurrentBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let h = cfg.hidden_size; let lru_width = cfg.lru_width.unwrap_or(h); let linear_y = linear(h, lru_width, true, vb.pp("linear_y"))?; let linear_x = linear(h, lru_width, true, vb.pp("linear_x"))?; let linear_out = linear(lru_width, h, true, vb.pp("linear_out"))?; let conv_1d = candle_nn::conv1d( lru_width, lru_width, cfg.conv1d_width, candle_nn::Conv1dConfig { groups: lru_width, padding: cfg.conv1d_width - 1, ..Default::default() }, vb.pp("conv_1d"), )?; let rg_lru = Rglru::new(cfg, vb.pp("rg_lru"))?; Ok(Self { linear_y, linear_x, linear_out, conv_1d, conv1d_state: None, conv1d_width: cfg.conv1d_width, rg_lru, act_fn: cfg.hidden_activation, }) } pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> { let (_b_sz, seq_len, _) = xs.dims3()?; let y_branch = xs.apply(&self.linear_y)?.apply(&self.act_fn)?; let x_branch = xs.apply(&self.linear_x)?.transpose(1, 2)?; let x_branch = if pos == 0 { let x_len = x_branch.dim(D::Minus1)?; let pad = self.conv1d_width as i64 - x_len as i64 - 1; let padded = match pad.cmp(&0) { std::cmp::Ordering::Equal => x_branch.clone(), std::cmp::Ordering::Less => { let rev_pad = (-pad) as usize; x_branch.narrow(D::Minus1, rev_pad, x_len - rev_pad)? } std::cmp::Ordering::Greater => { x_branch.pad_with_zeros(D::Minus1, pad as usize, 0)? } }; self.conv1d_state = Some(padded); x_branch .apply(&self.conv_1d)? .narrow(D::Minus1, 0, seq_len)? } else { let conv_state = match self.conv1d_state.as_ref() { None => candle::bail!("empty cache despite pos > 0"), Some(s) => Tensor::cat(&[s, &x_branch], D::Minus1)?, }; let w = self.conv_1d.weight().i((.., 0, ..))?; let x_branch = conv_state.broadcast_mul(&w)?.sum(D::Minus1)?; let x_branch = match self.conv_1d.bias() { None => x_branch, Some(b) => x_branch.broadcast_add(b)?, }; let x_branch = x_branch.unsqueeze(D::Minus1)?; self.conv1d_state = Some(conv_state.i((.., .., 1..))?); x_branch }; let x_branch = x_branch.transpose(1, 2)?; let x_branch = self.rg_lru.forward(&x_branch, pos)?; (x_branch * y_branch)?.apply(&self.linear_out) } } #[derive(Debug, Clone)] struct SdpaAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, n_heads: usize, n_kv_heads: usize, head_dim: usize, hidden_size: usize, kv_cache: Option<(Tensor, Tensor)>, rotary_emb: Arc<RotaryEmbedding>, } impl SdpaAttention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let h = cfg.hidden_size; let n_heads = cfg.num_attention_heads; let n_kv_heads = cfg.num_key_value_heads; let hd = cfg.head_dim; let q_proj = linear(h, n_heads * hd, cfg.attention_bias, vb.pp("q_proj"))?; let k_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("k_proj"))?; let v_proj = linear(h, n_kv_heads * hd, cfg.attention_bias, vb.pp("v_proj"))?; let o_proj = linear(n_heads * hd, h, true, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, n_heads, n_kv_heads, head_dim: hd, hidden_size: h, kv_cache: None, rotary_emb, }) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.n_heads / self.n_kv_heads; crate::utils::repeat_kv(x, n_rep) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, pos: usize, ) -> Result<Tensor> { let (bsz, q_len, _) = xs.dims3()?; let query_states = xs.apply(&self.q_proj)?; let key_states = xs.apply(&self.k_proj)?; let value_states = xs.apply(&self.v_proj)?; let query_states = query_states .reshape((bsz, q_len, self.n_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((bsz, q_len, self.n_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((bsz, q_len, self.n_kv_heads, self.head_dim))? .transpose(1, 2)?; let query_states = query_states.chunk(2, D::Minus1)?; let key_states = key_states.chunk(2, D::Minus1)?; let (query_rot, key_rot) = self.rotary_emb .apply_rotary_emb_qkv(&query_states[0], &key_states[0], pos)?; let query_states = Tensor::cat(&[&query_rot, &query_states[1]], D::Minus1)?.contiguous()?; let key_states = Tensor::cat(&[&key_rot, &key_states[1]], D::Minus1)?.contiguous()?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = self.repeat_kv(key_states)?; let value_states = self.repeat_kv(value_states)?; let xs = { let att = (query_states.matmul(&key_states.t()?)? / (self.head_dim as f64).sqrt())?; let att = if q_len == 1 { att } else { match attention_mask { None => att, Some(mask) => att.broadcast_add(mask)?, } }; let att = candle_nn::ops::softmax_last_dim(&att)?; att.matmul(&value_states.contiguous()?)? }; let xs = xs .transpose(1, 2)? .reshape((bsz, q_len, self.hidden_size))?; self.o_proj.forward(&xs) } } #[derive(Debug, Clone)] enum TemporalBlock { Recurrent(RecurrentBlock), Attention(SdpaAttention), } impl TemporalBlock { fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, pos: usize, ) -> Result<Tensor> { match self { Self::Recurrent(b) => b.forward(xs, pos), Self::Attention(b) => b.forward(xs, attention_mask, pos), } } } #[derive(Debug, Clone)] struct DecoderLayer { temporal_pre_norm: RmsNorm, channel_pre_norm: RmsNorm, temporal_block: TemporalBlock, mlp_block: Mlp, } impl DecoderLayer { fn new( block_idx: usize, rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder, ) -> Result<Self> { let h = cfg.hidden_size; let temporal_pre_norm = RmsNorm::new(h, cfg.rms_norm_eps, vb.pp("temporal_pre_norm"))?; let channel_pre_norm = RmsNorm::new(h, cfg.rms_norm_eps, vb.pp("channel_pre_norm"))?; let temporal_block = match cfg.block_types[block_idx % cfg.block_types.len()] { TemporalBlockType::Recurrent => { let block = RecurrentBlock::new(cfg, vb.pp("temporal_block"))?; TemporalBlock::Recurrent(block) } TemporalBlockType::Attention => { let block = SdpaAttention::new(rotary_emb, cfg, vb.pp("temporal_block"))?; TemporalBlock::Attention(block) } }; let mlp_block = Mlp::new(cfg, vb.pp("mlp_block"))?; Ok(Self { temporal_pre_norm, channel_pre_norm, temporal_block, mlp_block, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, pos: usize, ) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.temporal_pre_norm)?; let xs = self.temporal_block.forward(&xs, attention_mask, pos)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.channel_pre_norm)?.apply(&self.mlp_block)?; xs + residual } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, final_norm: RmsNorm, lm_head: Linear, hidden_size: usize, logits_soft_cap: f64, dtype: DType, device: Device, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb.device())?); let vb_b = vb.pp("layers"); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); for idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(idx, rotary_emb.clone(), cfg, vb_b.pp(idx))?; layers.push(layer) } let final_norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("final_norm"))?; let lm_head = Linear::new(embed_tokens.embeddings().clone(), None); Ok(Self { embed_tokens, layers, final_norm, lm_head, hidden_size: cfg.hidden_size, logits_soft_cap: cfg.logits_soft_cap, dtype: vb.dtype(), device: vb.device().clone(), }) } fn prepare_decoder_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { let mask: Vec<_> = (0..tgt_len) .flat_map(|i| (0..tgt_len).map(move |j| if i < j { f32::NEG_INFINITY } else { 0. })) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), DType::F32, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } pub fn forward(&mut self, xs: &Tensor, pos: usize) -> Result<Tensor> { let (b_size, seq_len) = xs.dims2()?; let attention_mask = if seq_len <= 1 { None } else { let mask = self.prepare_decoder_attention_mask(b_size, seq_len, pos)?; Some(mask) }; let xs = xs.apply(&self.embed_tokens)?; let mut xs = (xs * (self.hidden_size as f64).sqrt())?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), pos)?; } let logits = xs .narrow(1, seq_len - 1, 1)? .apply(&self.final_norm)? .apply(&self.lm_head)?; let logits = ((logits / self.logits_soft_cap)?.tanh()? * self.logits_soft_cap)?; Ok(logits) } }
candle/candle-transformers/src/models/recurrent_gemma.rs/0
{ "file_path": "candle/candle-transformers/src/models/recurrent_gemma.rs", "repo_id": "candle", "token_count": 11854 }
47
use super::schedulers::{betas_for_alpha_bar, BetaSchedule, PredictionType}; use candle::{Result, Tensor}; #[derive(Debug, Clone, PartialEq, Eq)] pub enum DDPMVarianceType { FixedSmall, FixedSmallLog, FixedLarge, FixedLargeLog, Learned, } impl Default for DDPMVarianceType { fn default() -> Self { Self::FixedSmall } } #[derive(Debug, Clone)] pub struct DDPMSchedulerConfig { /// The value of beta at the beginning of training. pub beta_start: f64, /// The value of beta at the end of training. pub beta_end: f64, /// How beta evolved during training. pub beta_schedule: BetaSchedule, /// Option to predicted sample between -1 and 1 for numerical stability. pub clip_sample: bool, /// Option to clip the variance used when adding noise to the denoised sample. pub variance_type: DDPMVarianceType, /// prediction type of the scheduler function pub prediction_type: PredictionType, /// number of diffusion steps used to train the model. pub train_timesteps: usize, } impl Default for DDPMSchedulerConfig { fn default() -> Self { Self { beta_start: 0.00085, beta_end: 0.012, beta_schedule: BetaSchedule::ScaledLinear, clip_sample: false, variance_type: DDPMVarianceType::FixedSmall, prediction_type: PredictionType::Epsilon, train_timesteps: 1000, } } } pub struct DDPMScheduler { alphas_cumprod: Vec<f64>, init_noise_sigma: f64, timesteps: Vec<usize>, step_ratio: usize, pub config: DDPMSchedulerConfig, } impl DDPMScheduler { pub fn new(inference_steps: usize, config: DDPMSchedulerConfig) -> Result<Self> { let betas = match config.beta_schedule { BetaSchedule::ScaledLinear => super::utils::linspace( config.beta_start.sqrt(), config.beta_end.sqrt(), config.train_timesteps, )? .sqr()?, BetaSchedule::Linear => { super::utils::linspace(config.beta_start, config.beta_end, config.train_timesteps)? } BetaSchedule::SquaredcosCapV2 => betas_for_alpha_bar(config.train_timesteps, 0.999)?, }; let betas = betas.to_vec1::<f64>()?; let mut alphas_cumprod = Vec::with_capacity(betas.len()); for &beta in betas.iter() { let alpha = 1.0 - beta; alphas_cumprod.push(alpha * *alphas_cumprod.last().unwrap_or(&1f64)) } // min(train_timesteps, inference_steps) // https://github.com/huggingface/diffusers/blob/8331da46837be40f96fbd24de6a6fb2da28acd11/src/diffusers/schedulers/scheduling_ddpm.py#L187 let inference_steps = inference_steps.min(config.train_timesteps); // arange the number of the scheduler's timesteps let step_ratio = config.train_timesteps / inference_steps; let timesteps: Vec<usize> = (0..inference_steps).map(|s| s * step_ratio).rev().collect(); Ok(Self { alphas_cumprod, init_noise_sigma: 1.0, timesteps, step_ratio, config, }) } fn get_variance(&self, timestep: usize) -> f64 { let prev_t = timestep as isize - self.step_ratio as isize; let alpha_prod_t = self.alphas_cumprod[timestep]; let alpha_prod_t_prev = if prev_t >= 0 { self.alphas_cumprod[prev_t as usize] } else { 1.0 }; let current_beta_t = 1. - alpha_prod_t / alpha_prod_t_prev; // For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) // and sample from it to get previous sample // x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample let variance = (1. - alpha_prod_t_prev) / (1. - alpha_prod_t) * current_beta_t; // retrieve variance match self.config.variance_type { DDPMVarianceType::FixedSmall => variance.max(1e-20), // for rl-diffuser https://arxiv.org/abs/2205.09991 DDPMVarianceType::FixedSmallLog => { let variance = variance.max(1e-20).ln(); (variance * 0.5).exp() } DDPMVarianceType::FixedLarge => current_beta_t, DDPMVarianceType::FixedLargeLog => current_beta_t.ln(), DDPMVarianceType::Learned => variance, } } pub fn timesteps(&self) -> &[usize] { self.timesteps.as_slice() } /// Ensures interchangeability with schedulers that need to scale the denoising model input /// depending on the current timestep. pub fn scale_model_input(&self, sample: Tensor, _timestep: usize) -> Tensor { sample } pub fn step(&self, model_output: &Tensor, timestep: usize, sample: &Tensor) -> Result<Tensor> { let prev_t = timestep as isize - self.step_ratio as isize; // https://github.com/huggingface/diffusers/blob/df2b548e893ccb8a888467c2508756680df22821/src/diffusers/schedulers/scheduling_ddpm.py#L272 // 1. compute alphas, betas let alpha_prod_t = self.alphas_cumprod[timestep]; let alpha_prod_t_prev = if prev_t >= 0 { self.alphas_cumprod[prev_t as usize] } else { 1.0 }; let beta_prod_t = 1. - alpha_prod_t; let beta_prod_t_prev = 1. - alpha_prod_t_prev; let current_alpha_t = alpha_prod_t / alpha_prod_t_prev; let current_beta_t = 1. - current_alpha_t; // 2. compute predicted original sample from predicted noise also called "predicted x_0" of formula (15) let mut pred_original_sample = match self.config.prediction_type { PredictionType::Epsilon => { ((sample - model_output * beta_prod_t.sqrt())? / alpha_prod_t.sqrt())? } PredictionType::Sample => model_output.clone(), PredictionType::VPrediction => { ((sample * alpha_prod_t.sqrt())? - model_output * beta_prod_t.sqrt())? } }; // 3. clip predicted x_0 if self.config.clip_sample { pred_original_sample = pred_original_sample.clamp(-1f32, 1f32)?; } // 4. Compute coefficients for pred_original_sample x_0 and current sample x_t // See formula (7) from https://arxiv.org/pdf/2006.11239.pdf let pred_original_sample_coeff = (alpha_prod_t_prev.sqrt() * current_beta_t) / beta_prod_t; let current_sample_coeff = current_alpha_t.sqrt() * beta_prod_t_prev / beta_prod_t; // 5. Compute predicted previous sample µ_t // See formula (7) from https://arxiv.org/pdf/2006.11239.pdf let pred_prev_sample = ((&pred_original_sample * pred_original_sample_coeff)? + sample * current_sample_coeff)?; // https://github.com/huggingface/diffusers/blob/df2b548e893ccb8a888467c2508756680df22821/src/diffusers/schedulers/scheduling_ddpm.py#L305 // 6. Add noise let mut variance = model_output.zeros_like()?; if timestep > 0 { let variance_noise = model_output.randn_like(0., 1.)?; if self.config.variance_type == DDPMVarianceType::FixedSmallLog { variance = (variance_noise * self.get_variance(timestep))?; } else { variance = (variance_noise * self.get_variance(timestep).sqrt())?; } } &pred_prev_sample + variance } pub fn add_noise( &self, original_samples: &Tensor, noise: Tensor, timestep: usize, ) -> Result<Tensor> { (original_samples * self.alphas_cumprod[timestep].sqrt())? + noise * (1. - self.alphas_cumprod[timestep]).sqrt() } pub fn init_noise_sigma(&self) -> f64 { self.init_noise_sigma } }
candle/candle-transformers/src/models/stable_diffusion/ddpm.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/ddpm.rs", "repo_id": "candle", "token_count": 3662 }
48
use crate::models::with_tracing::QMatMul; use crate::quantized_var_builder::VarBuilder; use candle::quantized::QTensor; use candle::{Module, Result, Tensor}; #[derive(Debug, Clone)] pub struct Embedding { inner: candle_nn::Embedding, span: tracing::Span, } impl Embedding { pub fn new(d1: usize, d2: usize, vb: VarBuilder) -> Result<Self> { let embeddings = vb.get((d1, d2), "weight")?.dequantize(vb.device())?; let inner = candle_nn::Embedding::new(embeddings, d2); let span = tracing::span!(tracing::Level::TRACE, "embedding"); Ok(Self { inner, span }) } pub fn embeddings(&self) -> &Tensor { self.inner.embeddings() } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } #[derive(Debug, Clone)] pub struct Linear { weight: QMatMul, bias: Option<Tensor>, } impl Linear { pub fn from_arc(weight: std::sync::Arc<QTensor>, bias: Option<Tensor>) -> Result<Self> { let weight = QMatMul::from_weights(weight)?; Ok(Self { weight, bias }) } pub fn from_weights(weight: QMatMul, bias: Option<Tensor>) -> Self { Self { weight, bias } } } impl Module for Linear { fn forward(&self, x: &Tensor) -> candle::Result<Tensor> { let x = x.apply(&self.weight)?; match &self.bias { None => Ok(x), Some(bias) => x.broadcast_add(bias), } } } pub fn linear_b(in_dim: usize, out_dim: usize, bias: bool, vb: VarBuilder) -> Result<Linear> { let bias = if bias { Some(vb.get(out_dim, "bias")?.dequantize(vb.device())?) } else { None }; let weight = QMatMul::new(in_dim, out_dim, vb)?; Ok(Linear { weight, bias }) } pub fn linear(in_dim: usize, out_dim: usize, vb: VarBuilder) -> Result<Linear> { let bias = vb.get(out_dim, "bias")?.dequantize(vb.device())?; let weight = QMatMul::new(in_dim, out_dim, vb)?; Ok(Linear { weight, bias: Some(bias), }) } pub fn layer_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<candle_nn::LayerNorm> { let weight = vb.get(size, "weight")?.dequantize(vb.device())?; let bias = vb.get(size, "bias")?.dequantize(vb.device())?; Ok(candle_nn::LayerNorm::new(weight, bias, eps)) } pub fn layer_norm_no_bias(size: usize, eps: f64, vb: VarBuilder) -> Result<candle_nn::LayerNorm> { let weight = vb.get(size, "weight")?.dequantize(vb.device())?; Ok(candle_nn::LayerNorm::new_no_bias(weight, eps)) } pub fn linear_no_bias(in_dim: usize, out_dim: usize, vb: VarBuilder) -> Result<Linear> { let weight = QMatMul::new(in_dim, out_dim, vb)?; Ok(Linear { weight, bias: None }) } #[derive(Debug, Clone)] pub struct RmsNorm { weight: Tensor, eps: f64, span: tracing::Span, } impl RmsNorm { pub fn new(size: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "rms-norm"); let weight = vb.get(size, "weight")?.dequantize(vb.device())?; Ok(Self { weight, eps, span }) } pub fn from_qtensor(weight: QTensor, eps: f64) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "rms-norm"); let weight = weight.dequantize(&weight.device())?; Ok(Self { weight, eps, span }) } } impl Module for RmsNorm { fn forward(&self, x: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); candle_nn::ops::rms_norm(x, &self.weight, self.eps as f32) } }
candle/candle-transformers/src/quantized_nn.rs/0
{ "file_path": "candle/candle-transformers/src/quantized_nn.rs", "repo_id": "candle", "token_count": 1623 }
49
//load the candle Whisper decoder wasm module import init, { Decoder } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "whisper-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Whisper { static instance = {}; // Retrieve the Whisper model. When called for the first time, // this will load the model and save it for future use. static async getInstance(params) { const { weightsURL, modelID, tokenizerURL, mel_filtersURL, configURL, quantized, is_multilingual, timestamps, task, language, } = params; // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [ weightsArrayU8, tokenizerArrayU8, mel_filtersArrayU8, configArrayU8, ] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(mel_filtersURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new Decoder( weightsArrayU8, tokenizerArrayU8, mel_filtersArrayU8, configArrayU8, quantized, is_multilingual, timestamps, task, language ); } else { self.postMessage({ status: "loading", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, modelID, tokenizerURL, configURL, mel_filtersURL, audioURL, } = event.data; try { self.postMessage({ status: "decoding", message: "Starting Decoder" }); let quantized = false; if (modelID.includes("quantized")) { quantized = true; } let is_multilingual = false; if (modelID.includes("multilingual")) { is_multilingual = true; } let timestamps = true; const decoder = await Whisper.getInstance({ weightsURL, modelID, tokenizerURL, mel_filtersURL, configURL, quantized, is_multilingual, timestamps, task: null, language: null, }); self.postMessage({ status: "decoding", message: "Loading Audio" }); const audioArrayU8 = await fetchArrayBuffer(audioURL); self.postMessage({ status: "decoding", message: "Running Decoder..." }); const segments = decoder.decode(audioArrayU8); // Send the segment back to the main thread as JSON self.postMessage({ status: "complete", message: "complete", output: JSON.parse(segments), }); } catch (e) { self.postMessage({ error: e }); } });
candle/candle-wasm-examples/whisper/whisperWorker.js/0
{ "file_path": "candle/candle-wasm-examples/whisper/whisperWorker.js", "repo_id": "candle", "token_count": 1215 }
50
Run the tests with: ```bash RUST_LOG=wasm_bindgen_test_runner wasm-pack test --chrome --headless ``` Or: ```bash wasm-pack test --chrome ``` If you get an "invalid session id" failure in headless mode, check that logs and it may well be that your ChromeDriver is not at the same version as your browser.
candle/candle-wasm-tests/README.md/0
{ "file_path": "candle/candle-wasm-tests/README.md", "repo_id": "candle", "token_count": 98 }
51
{{- define "name" -}} {{- default $.Release.Name | trunc 63 | trimSuffix "-" -}} {{- end -}} {{- define "app.name" -}} chat-ui {{- end -}} {{- define "labels.standard" -}} release: {{ $.Release.Name | quote }} heritage: {{ $.Release.Service | quote }} chart: "{{ include "name" . }}" app: "{{ include "app.name" . }}" {{- end -}} {{- define "labels.resolver" -}} release: {{ $.Release.Name | quote }} heritage: {{ $.Release.Service | quote }} chart: "{{ include "name" . }}" app: "{{ include "app.name" . }}-resolver" {{- end -}}
chat-ui/chart/templates/_helpers.tpl/0
{ "file_path": "chat-ui/chart/templates/_helpers.tpl", "repo_id": "chat-ui", "token_count": 202 }
52
# Anthropic | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | No | | [Multimodal](../multimodal) | Yes | We also support Anthropic models (including multimodal ones via `multmodal: true`) through the official SDK. You may provide your API key via the `ANTHROPIC_API_KEY` env variable, or alternatively, through the `endpoints.apiKey` as per the following example. ```ini MODELS=`[ { "name": "claude-3-haiku-20240307", "displayName": "Claude 3 Haiku", "description": "Fastest and most compact model for near-instant responsiveness", "multimodal": true, "parameters": { "max_new_tokens": 4096, }, "endpoints": [ { "type": "anthropic", // optionals "apiKey": "sk-ant-...", "baseURL": "https://api.anthropic.com", "defaultHeaders": {}, "defaultQuery": {} } ] }, { "name": "claude-3-sonnet-20240229", "displayName": "Claude 3 Sonnet", "description": "Ideal balance of intelligence and speed", "multimodal": true, "parameters": { "max_new_tokens": 4096, }, "endpoints": [ { "type": "anthropic", // optionals "apiKey": "sk-ant-...", "baseURL": "https://api.anthropic.com", "defaultHeaders": {}, "defaultQuery": {} } ] }, { "name": "claude-3-opus-20240229", "displayName": "Claude 3 Opus", "description": "Most powerful model for highly complex tasks", "multimodal": true, "parameters": { "max_new_tokens": 4096 }, "endpoints": [ { "type": "anthropic", // optionals "apiKey": "sk-ant-...", "baseURL": "https://api.anthropic.com", "defaultHeaders": {}, "defaultQuery": {} } ] } ]` ``` ## VertexAI We also support using Anthropic models running on Vertex AI. Authentication is done using Google Application Default Credentials. Project ID can be provided through the `endpoints.projectId` as per the following example: ```ini MODELS=`[ { "name": "claude-3-haiku@20240307", "displayName": "Claude 3 Haiku", "description": "Fastest, most compact model for near-instant responsiveness", "multimodal": true, "parameters": { "max_new_tokens": 4096 }, "endpoints": [ { "type": "anthropic-vertex", "region": "us-central1", "projectId": "gcp-project-id", // optionals "defaultHeaders": {}, "defaultQuery": {} } ] }, { "name": "claude-3-sonnet@20240229", "displayName": "Claude 3 Sonnet", "description": "Ideal balance of intelligence and speed", "multimodal": true, "parameters": { "max_new_tokens": 4096, }, "endpoints": [ { "type": "anthropic-vertex", "region": "us-central1", "projectId": "gcp-project-id", // optionals "defaultHeaders": {}, "defaultQuery": {} } ] }, ]` ```
chat-ui/docs/source/configuration/models/providers/anthropic.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/anthropic.md", "repo_id": "chat-ui", "token_count": 1541 }
53
# Copy HuggingChat The config file for HuggingChat is stored in the `chart/env/prod.yaml` file. It is the source of truth for the environment variables used for our CI/CD pipeline. For HuggingChat, as we need to customize the app color, as well as the base path, we build a custom docker image. You can find the workflow here. <Tip> If you want to make changes to the model config used in production for HuggingChat, you should do so against `chart/env/prod.yaml`. </Tip> ### Running a copy of HuggingChat locally If you want to run an exact copy of HuggingChat locally, you will need to do the following first: 1. Create an [OAuth App on the hub](https://huggingface.co/settings/applications/new) with `openid profile email` permissions. Make sure to set the callback URL to something like `http://localhost:5173/chat/login/callback` which matches the right path for your local instance. 2. Create a [HF Token](https://huggingface.co/settings/tokens) with your Hugging Face account. You will need a Pro account to be able to access some of the larger models available through HuggingChat. 3. Create a free account with [serper.dev](https://serper.dev/) (you will get 2500 free search queries) 4. Run an instance of MongoDB, however you want. (Local or remote) You can then create a new `.env.SECRET_CONFIG` file with the following content ```ini MONGODB_URL=<link to your mongo DB from step 4> HF_TOKEN=<your HF token from step 2> OPENID_CONFIG=`{ PROVIDER_URL: "https://huggingface.co", CLIENT_ID: "<your client ID from step 1>", CLIENT_SECRET: "<your client secret from step 1>", }` SERPER_API_KEY=<your serper API key from step 3> MESSAGES_BEFORE_LOGIN=<can be any numerical value, or set to 0 to require login> ``` You can then run `npm run updateLocalEnv` in the root of chat-ui. This will create a `.env.local` file which combines the `chart/env/prod.yaml` and the `.env.SECRET_CONFIG` file. You can then run `npm run dev` to start your local instance of HuggingChat. ### Populate database <Tip warning={true}> The `MONGODB_URL` used for this script will be fetched from `.env.local`. Make sure it's correct! The command runs directly on the database. </Tip> You can populate the database using faker data using the `populate` script: ```bash npm run populate <flags here> ``` At least one flag must be specified, the following flags are available: - `reset` - resets the database - `all` - populates all tables - `users` - populates the users table - `settings` - populates the settings table for existing users - `assistants` - populates the assistants table for existing users - `conversations` - populates the conversations table for existing users For example, you could use it like so: ```bash npm run populate reset ``` to clear out the database. Then login in the app to create your user and run the following command: ```bash npm run populate users settings assistants conversations ``` to populate the database with fake data, including fake conversations and assistants for your user.
chat-ui/docs/source/developing/copy-huggingchat.md/0
{ "file_path": "chat-ui/docs/source/developing/copy-huggingchat.md", "repo_id": "chat-ui", "token_count": 870 }
54
import { env } from "$env/dynamic/private"; import { env as envPublic } from "$env/dynamic/public"; import type { Handle, HandleServerError } from "@sveltejs/kit"; import { collections } from "$lib/server/database"; import { base } from "$app/paths"; import { findUser, refreshSessionCookie, requiresUser } from "$lib/server/auth"; import { ERROR_MESSAGES } from "$lib/stores/errors"; import { sha256 } from "$lib/utils/sha256"; import { addWeeks } from "date-fns"; import { checkAndRunMigrations } from "$lib/migrations/migrations"; import { building } from "$app/environment"; import { logger } from "$lib/server/logger"; import { AbortedGenerations } from "$lib/server/abortedGenerations"; import { MetricsServer } from "$lib/server/metrics"; import { initExitHandler } from "$lib/server/exitHandler"; import { ObjectId } from "mongodb"; import { refreshAssistantsCounts } from "$lib/jobs/refresh-assistants-counts"; import { refreshConversationStats } from "$lib/jobs/refresh-conversation-stats"; // TODO: move this code on a started server hook, instead of using a "building" flag if (!building) { logger.info("Starting server..."); initExitHandler(); await checkAndRunMigrations(); if (env.ENABLE_ASSISTANTS) { refreshAssistantsCounts(); } refreshConversationStats(); // Init metrics server MetricsServer.getInstance(); // Init AbortedGenerations refresh process AbortedGenerations.getInstance(); } export const handleError: HandleServerError = async ({ error, event, status, message }) => { // handle 404 if (building) { throw error; } if (event.route.id === null) { return { message: `Page ${event.url.pathname} not found`, }; } const errorId = crypto.randomUUID(); logger.error({ locals: event.locals, url: event.request.url, params: event.params, request: event.request, message, error, errorId, status, }); return { message: "An error occurred", errorId, }; }; export const handle: Handle = async ({ event, resolve }) => { logger.debug({ locals: event.locals, url: event.url.pathname, params: event.params, request: event.request, }); if (event.url.pathname.startsWith(`${base}/api/`) && env.EXPOSE_API !== "true") { return new Response("API is disabled", { status: 403 }); } function errorResponse(status: number, message: string) { const sendJson = event.request.headers.get("accept")?.includes("application/json") || event.request.headers.get("content-type")?.includes("application/json"); return new Response(sendJson ? JSON.stringify({ error: message }) : message, { status, headers: { "content-type": sendJson ? "application/json" : "text/plain", }, }); } if (event.url.pathname.startsWith(`${base}/admin/`) || event.url.pathname === `${base}/admin`) { const ADMIN_SECRET = env.ADMIN_API_SECRET || env.PARQUET_EXPORT_SECRET; if (!ADMIN_SECRET) { return errorResponse(500, "Admin API is not configured"); } if (event.request.headers.get("Authorization") !== `Bearer ${ADMIN_SECRET}`) { return errorResponse(401, "Unauthorized"); } } const token = event.cookies.get(env.COOKIE_NAME); // if the trusted email header is set we use it to get the user email const email = env.TRUSTED_EMAIL_HEADER ? event.request.headers.get(env.TRUSTED_EMAIL_HEADER) : null; let secretSessionId: string | null = null; let sessionId: string | null = null; if (email) { secretSessionId = sessionId = await sha256(email); event.locals.user = { // generate id based on email _id: new ObjectId(sessionId.slice(0, 24)), name: email, email, createdAt: new Date(), updatedAt: new Date(), hfUserId: email, avatarUrl: "", logoutDisabled: true, }; } else if (token) { secretSessionId = token; sessionId = await sha256(token); const user = await findUser(sessionId); if (user) { event.locals.user = user; } } else if (event.url.pathname.startsWith(`${base}/api/`) && env.USE_HF_TOKEN_IN_API === "true") { // if the request goes to the API and no user is available in the header // check if a bearer token is available in the Authorization header const authorization = event.request.headers.get("Authorization"); if (authorization && authorization.startsWith("Bearer ")) { const token = authorization.slice(7); const hash = await sha256(token); sessionId = secretSessionId = hash; // check if the hash is in the DB and get the user // else check against https://huggingface.co/api/whoami-v2 const cacheHit = await collections.tokenCaches.findOne({ tokenHash: hash }); if (cacheHit) { const user = await collections.users.findOne({ hfUserId: cacheHit.userId }); if (!user) { return errorResponse(500, "User not found"); } event.locals.user = user; } else { const response = await fetch("https://huggingface.co/api/whoami-v2", { headers: { Authorization: `Bearer ${token}`, }, }); if (!response.ok) { return errorResponse(401, "Unauthorized"); } const data = await response.json(); const user = await collections.users.findOne({ hfUserId: data.id }); if (!user) { return errorResponse(500, "User not found"); } await collections.tokenCaches.insertOne({ tokenHash: hash, userId: data.id, createdAt: new Date(), updatedAt: new Date(), }); event.locals.user = user; } } } if (!sessionId || !secretSessionId) { secretSessionId = crypto.randomUUID(); sessionId = await sha256(secretSessionId); if (await collections.sessions.findOne({ sessionId })) { return errorResponse(500, "Session ID collision"); } } event.locals.sessionId = sessionId; // CSRF protection const requestContentType = event.request.headers.get("content-type")?.split(";")[0] ?? ""; /** https://developer.mozilla.org/en-US/docs/Web/HTML/Element/form#attr-enctype */ const nativeFormContentTypes = [ "multipart/form-data", "application/x-www-form-urlencoded", "text/plain", ]; if (event.request.method === "POST") { refreshSessionCookie(event.cookies, event.locals.sessionId); if (nativeFormContentTypes.includes(requestContentType)) { const origin = event.request.headers.get("origin"); if (!origin) { return errorResponse(403, "Non-JSON form requests need to have an origin"); } const validOrigins = [ new URL(event.request.url).host, ...(envPublic.PUBLIC_ORIGIN ? [new URL(envPublic.PUBLIC_ORIGIN).host] : []), ]; if (!validOrigins.includes(new URL(origin).host)) { return errorResponse(403, "Invalid referer for POST request"); } } } if (event.request.method === "POST") { // if the request is a POST request we refresh the cookie refreshSessionCookie(event.cookies, secretSessionId); await collections.sessions.updateOne( { sessionId }, { $set: { updatedAt: new Date(), expiresAt: addWeeks(new Date(), 2) } } ); } if ( !event.url.pathname.startsWith(`${base}/login`) && !event.url.pathname.startsWith(`${base}/admin`) && !["GET", "OPTIONS", "HEAD"].includes(event.request.method) ) { if ( !event.locals.user && requiresUser && !((env.MESSAGES_BEFORE_LOGIN ? parseInt(env.MESSAGES_BEFORE_LOGIN) : 0) > 0) ) { return errorResponse(401, ERROR_MESSAGES.authOnly); } // if login is not required and the call is not from /settings and we display the ethics modal with PUBLIC_APP_DISCLAIMER // we check if the user has accepted the ethics modal first. // If login is required, `ethicsModalAcceptedAt` is already true at this point, so do not pass this condition. This saves a DB call. if ( !requiresUser && !event.url.pathname.startsWith(`${base}/settings`) && envPublic.PUBLIC_APP_DISCLAIMER === "1" ) { const hasAcceptedEthicsModal = await collections.settings.countDocuments({ sessionId: event.locals.sessionId, ethicsModalAcceptedAt: { $exists: true }, }); if (!hasAcceptedEthicsModal) { return errorResponse(405, "You need to accept the welcome modal first"); } } } let replaced = false; const response = await resolve(event, { transformPageChunk: (chunk) => { // For some reason, Sveltekit doesn't let us load env variables from .env in the app.html template if (replaced || !chunk.html.includes("%gaId%")) { return chunk.html; } replaced = true; return chunk.html.replace("%gaId%", envPublic.PUBLIC_GOOGLE_ANALYTICS_ID); }, }); return response; };
chat-ui/src/hooks.server.ts/0
{ "file_path": "chat-ui/src/hooks.server.ts", "repo_id": "chat-ui", "token_count": 3088 }
55
<script lang="ts"> import CarbonEarth from "~icons/carbon/earth"; import CarbonArrowUpRight from "~icons/carbon/arrow-up-right"; import BIMeta from "~icons/bi/meta"; import type { Model } from "$lib/types/Model"; export let model: Pick<Model, "name" | "datasetName" | "websiteUrl" | "modelUrl" | "datasetUrl">; export let variant: "light" | "dark" = "light"; </script> <div class="flex items-center gap-5 rounded-xl bg-gray-100 px-3 py-2 text-xs sm:text-sm {variant === 'dark' ? 'text-gray-600 dark:bg-gray-800 dark:text-gray-300' : 'text-gray-800 dark:bg-gray-100 dark:text-gray-600'}" > <a href={model.modelUrl || "https://huggingface.co/" + model.name} target="_blank" rel="noreferrer" class="flex items-center hover:underline" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs text-gray-400" /> Model <div class="max-sm:hidden">&nbsp;page</div></a > {#if model.datasetName || model.datasetUrl} <a href={model.datasetUrl || "https://huggingface.co/datasets/" + model.datasetName} target="_blank" rel="noreferrer" class="flex items-center hover:underline" ><CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs text-gray-400" /> Dataset <div class="max-sm:hidden">&nbsp;page</div></a > {/if} {#if model.websiteUrl} <a href={model.websiteUrl} target="_blank" class="ml-auto flex items-center hover:underline" rel="noreferrer" > {#if model.name.startsWith("meta-llama/Meta-Llama")} <BIMeta class="mr-1.5 shrink-0 text-xs text-gray-400" /> Built with Llama {:else} <CarbonEarth class="mr-1.5 shrink-0 text-xs text-gray-400" /> Website {/if} </a> {/if} </div>
chat-ui/src/lib/components/ModelCardMetadata.svelte/0
{ "file_path": "chat-ui/src/lib/components/ModelCardMetadata.svelte", "repo_id": "chat-ui", "token_count": 720 }
56
<script lang="ts"> import { base } from "$app/paths"; import { page } from "$app/stores"; import { clickOutside } from "$lib/actions/clickOutside"; import { useSettingsStore } from "$lib/stores/settings"; import type { ToolFront } from "$lib/types/Tool"; import { isHuggingChat } from "$lib/utils/isHuggingChat"; import IconTool from "./icons/IconTool.svelte"; import CarbonInformation from "~icons/carbon/information"; import CarbonGlobe from "~icons/carbon/earth-filled"; export let loading = false; const settings = useSettingsStore(); let detailsEl: HTMLDetailsElement; // active tools are all the checked tools, either from settings or on by default $: activeToolCount = $page.data.tools.filter( (tool: ToolFront) => // community tools are always on by default tool.type === "community" || $settings?.tools?.includes(tool._id) ).length; async function setAllTools(value: boolean) { const configToolsIds = $page.data.tools .filter((t: ToolFront) => t.type === "config") .map((t: ToolFront) => t._id); if (value) { await settings.instantSet({ tools: Array.from(new Set([...configToolsIds, ...($settings?.tools ?? [])])), }); } else { await settings.instantSet({ tools: [], }); } } $: allToolsEnabled = activeToolCount === $page.data.tools.length; $: tools = $page.data.tools; </script> <details class="group relative bottom-0 h-full min-h-8" bind:this={detailsEl} use:clickOutside={() => { if (detailsEl.hasAttribute("open")) { detailsEl.removeAttribute("open"); } }} > <summary class="absolute bottom-0 flex h-8 cursor-pointer select-none items-center gap-1 rounded-lg border bg-white px-2 py-1.5 shadow-sm hover:shadow-none dark:border-gray-800 dark:bg-gray-900" > <IconTool classNames="dark:text-purple-600" /> Tools <span class="text-gray-400 dark:text-gray-500"> ({activeToolCount}) </span> </summary> <div class="absolute bottom-10 h-max w-max select-none items-center gap-1 rounded-lg border bg-white p-0.5 shadow-sm dark:border-gray-800 dark:bg-gray-900" > <div class="grid grid-cols-2 gap-x-6 gap-y-1 p-3"> <div class="col-span-2 flex items-center gap-1.5 text-sm text-gray-500"> Available tools {#if isHuggingChat} <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions/470" target="_blank" class="hover:brightness-0 dark:hover:brightness-200" ><CarbonInformation class="text-xs" /></a > {/if} <button class="ml-auto text-xs underline" on:click|stopPropagation={() => setAllTools(!allToolsEnabled)} > {#if allToolsEnabled} Disable all {:else} Enable all {/if} </button> </div> <!-- XXX: feature_flag_tools --> {#if $page.data.user?.isEarlyAccess} <a href="{base}/tools" class="col-span-2 my-1 h-fit w-fit items-center justify-center rounded-full bg-purple-500/20 px-2.5 py-1.5 text-sm hover:bg-purple-500/30" > <span class="mr-1 rounded-full bg-purple-700 px-1.5 py-1 text-xs font-bold uppercase"> new </span> Browse community tools ({$page.data.communityToolCount ?? 0}) </a> {/if} {#each tools as tool} {@const isChecked = $settings?.tools?.includes(tool._id)} <div class="flex items-center gap-1.5"> {#if tool.type === "community"} <input type="checkbox" id={tool._id} checked={true} class="rounded-xs font-semibold accent-purple-500 hover:accent-purple-600" on:click|stopPropagation|preventDefault={async () => { await settings.instantSet({ tools: $settings?.tools?.filter((t) => t !== tool._id) ?? [], }); }} /> {:else} <input type="checkbox" id={tool._id} checked={isChecked} disabled={loading} on:click|stopPropagation={async () => { if (isChecked) { await settings.instantSet({ tools: ($settings?.tools ?? []).filter((t) => t !== tool._id), }); } else { await settings.instantSet({ tools: [...($settings?.tools ?? []), tool._id], }); } }} /> {/if} <label class="cursor-pointer" for={tool._id}>{tool.displayName}</label> {#if tool.type === "community"} <a href="{base}/tools/{tool._id}" class="text-purple-600 hover:text-purple-700"> <CarbonGlobe /> </a> {/if} </div> {/each} </div> </div> </details> <style> details summary::-webkit-details-marker { display: none; } </style>
chat-ui/src/lib/components/ToolsMenu.svelte/0
{ "file_path": "chat-ui/src/lib/components/ToolsMenu.svelte", "repo_id": "chat-ui", "token_count": 1987 }
57
<script lang="ts"> export let classNames = ""; </script> <div class={"inline-flex h-8 flex-none items-center gap-1 " + classNames}> <div class="h-1 w-1 flex-none animate-bounce rounded-full bg-gray-500 dark:bg-gray-400" style="animation-delay: 0.25s;" /> <div class="h-1 w-1 flex-none animate-bounce rounded-full bg-gray-500 dark:bg-gray-400" style="animation-delay: 0.5s;" /> <div class="h-1 w-1 flex-none animate-bounce rounded-full bg-gray-500 dark:bg-gray-400" style="animation-delay: 0.75s;" /> </div>
chat-ui/src/lib/components/icons/IconLoading.svelte/0
{ "file_path": "chat-ui/src/lib/components/icons/IconLoading.svelte", "repo_id": "chat-ui", "token_count": 223 }
58
import { ObjectId, type WithId } from "mongodb"; import { collections } from "$lib/server/database"; import type { Migration } from "."; import type { Conversation } from "$lib/types/Conversation"; import type { MessageFile } from "$lib/types/Message"; const updateMessageFiles: Migration = { _id: new ObjectId("5f9f5f5f5f5f5f5f5f5f5f5f"), name: "Convert message files to the new schema", up: async () => { const allConversations = collections.conversations.find({}, { projection: { messages: 1 } }); let conversation: WithId<Pick<Conversation, "messages">> | null = null; while ((conversation = await allConversations.tryNext())) { const messages = conversation.messages.map((message) => { const files = (message.files as string[] | undefined)?.map<MessageFile>((file) => { // File is already in the new format if (typeof file !== "string") return file; // File was a hash pointing to a file in the bucket if (file.length === 64) { return { type: "hash", name: "unknown.jpg", value: file, mime: "image/jpeg", }; } // File was a base64 string else { return { type: "base64", name: "unknown.jpg", value: file, mime: "image/jpeg", }; } }); return { ...message, files, }; }); // Set the new messages array await collections.conversations.updateOne({ _id: conversation._id }, { $set: { messages } }); } return true; }, runEveryTime: false, }; export default updateMessageFiles;
chat-ui/src/lib/migrations/routines/05-update-message-files.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/05-update-message-files.ts", "repo_id": "chat-ui", "token_count": 618 }
59
import { buildPrompt } from "$lib/buildPrompt"; import { textGenerationStream } from "@huggingface/inference"; import { z } from "zod"; import type { Endpoint } from "../endpoints"; export const endpointAwsParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("aws"), url: z.string().url(), accessKey: z .string({ description: "An AWS Access Key ID. If not provided, the default AWS identity resolution will be used", }) .min(1) .optional(), secretKey: z .string({ description: "An AWS Access Key Secret. If not provided, the default AWS identity resolution will be used", }) .min(1) .optional(), sessionToken: z.string().optional(), service: z.union([z.literal("sagemaker"), z.literal("lambda")]).default("sagemaker"), region: z.string().optional(), }); export async function endpointAws( input: z.input<typeof endpointAwsParametersSchema> ): Promise<Endpoint> { let createSignedFetcher; try { createSignedFetcher = (await import("aws-sigv4-fetch")).createSignedFetcher; } catch (e) { throw new Error("Failed to import aws-sigv4-fetch"); } const { url, accessKey, secretKey, sessionToken, model, region, service } = endpointAwsParametersSchema.parse(input); const signedFetch = createSignedFetcher({ service, region, credentials: accessKey && secretKey ? { accessKeyId: accessKey, secretAccessKey: secretKey, sessionToken } : undefined, }); return async ({ messages, preprompt, continueMessage, generateSettings }) => { const prompt = await buildPrompt({ messages, continueMessage, preprompt, model, }); return textGenerationStream( { parameters: { ...model.parameters, ...generateSettings, return_full_text: false }, model: url, inputs: prompt, }, { use_cache: false, fetch: signedFetch, } ); }; } export default endpointAws;
chat-ui/src/lib/server/endpoints/aws/endpointAws.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/aws/endpointAws.ts", "repo_id": "chat-ui", "token_count": 695 }
60
import { randomUUID } from "$lib/utils/randomUuid"; import { timeout } from "$lib/utils/timeout"; import { logger } from "./logger"; type ExitHandler = () => void | Promise<void>; type ExitHandlerUnsubscribe = () => void; const listeners = new Map<string, ExitHandler>(); export function onExit(cb: ExitHandler): ExitHandlerUnsubscribe { const uuid = randomUUID(); listeners.set(uuid, cb); return () => { listeners.delete(uuid); }; } async function runExitHandler(handler: ExitHandler): Promise<void> { return timeout(Promise.resolve().then(handler), 30_000).catch((err) => { logger.error(err, "Exit handler failed to run"); }); } export function initExitHandler() { let signalCount = 0; const exitHandler = async () => { signalCount++; if (signalCount === 1) { logger.info("Received signal... Exiting"); await Promise.all(Array.from(listeners.values()).map(runExitHandler)); logger.info("All exit handlers ran... Waiting for svelte server to exit"); } if (signalCount === 3) { logger.warn("Received 3 signals... Exiting immediately"); process.exit(1); } }; process.on("SIGINT", exitHandler); process.on("SIGTERM", exitHandler); }
chat-ui/src/lib/server/exitHandler.ts/0
{ "file_path": "chat-ui/src/lib/server/exitHandler.ts", "repo_id": "chat-ui", "token_count": 402 }
61
import type { ConfigTool } from "$lib/types/Tool"; import { ObjectId } from "mongodb"; import vm from "node:vm"; const calculator: ConfigTool = { _id: new ObjectId("00000000000000000000000C"), type: "config", description: "Calculate the result of a mathematical expression", color: "blue", icon: "code", displayName: "Calculator", name: "calculator", endpoint: null, inputs: [ { name: "equation", type: "str", description: "A mathematical expression to be evaluated. The result of the expression will be returned.", paramType: "required", }, ], outputComponent: null, outputComponentIdx: null, showOutput: false, async *call({ equation }) { try { const blocks = String(equation).split("\n"); const query = blocks[blocks.length - 1].replace(/[^-()\d/*+.]/g, ""); return { outputs: [{ calculator: `${query} = ${vm.runInNewContext(query)}` }], }; } catch (cause) { throw new Error("Invalid expression", { cause }); } }, }; export default calculator;
chat-ui/src/lib/server/tools/calculator.ts/0
{ "file_path": "chat-ui/src/lib/server/tools/calculator.ts", "repo_id": "chat-ui", "token_count": 360 }
62
import type { SerializedHTMLElement } from "../../scrape/types"; import { MarkdownElementType, type MarkdownElement } from "../types"; // --- Markdown Elements --- /** Converts markdown element to a string with formatting */ export function stringifyMarkdownElement(elem: MarkdownElement): string { const content = elem.content.trim(); if (elem.type === MarkdownElementType.Header) return `${"#".repeat(elem.level)} ${content}\n\n`; if (elem.type === MarkdownElementType.BlockQuote) { return `${"> ".repeat(elem.depth)}${content}\n\n`; } if (elem.type === MarkdownElementType.CodeBlock) return `\`\`\`\n${content}\n\`\`\`\n\n`; if (elem.type === MarkdownElementType.UnorderedListItem) return `- ${content}\n`; if (elem.type === MarkdownElementType.OrderedListItem) { const siblings = elem.parent?.children ?? [elem]; const currentIndex = siblings.indexOf(elem); const lastAdjacentIndex = siblings .slice(currentIndex + 1) .findLastIndex((child) => child.type === MarkdownElementType.OrderedListItem); const order = currentIndex - lastAdjacentIndex + 1; return `${order}. ${content}\n`; } return `${content}\n\n`; } /** Converts a tree of markdown elements to a string with formatting */ export function stringifyMarkdownElementTree(elem: MarkdownElement): string { const stringified = stringifyMarkdownElement(elem); if (!("children" in elem)) return stringified; return stringified + elem.children.map(stringifyMarkdownElementTree).join(""); } // ----- HTML Elements ----- /** Ignores all non-inline tag types and grabs their text. Converts inline tags to markdown */ export function stringifyHTMLElements(elems: (SerializedHTMLElement | string)[]): string { return elems.map(stringifyHTMLElement).join("").trim(); } /** Ignores all non-inline tag types and grabs their text. Converts inline tags to markdown */ export function stringifyHTMLElement(elem: SerializedHTMLElement | string): string { if (typeof elem === "string") return elem; if (elem.tagName === "br") return "\n"; const content = elem.content.map(stringifyHTMLElement).join(""); if (content.length === 0) return content; if (elem.tagName === "strong" || elem.tagName === "b") return `**${content}**`; if (elem.tagName === "em" || elem.tagName === "i") return `*${content}*`; if (elem.tagName === "s" || elem.tagName === "strike") return `~~${content}~~`; if (elem.tagName === "code" || elem.tagName === "var" || elem.tagName === "tt") { return `\`${content}\``; } if (elem.tagName === "sup") return `<sup>${content}</sup>`; if (elem.tagName === "sub") return `<sub>${content}</sub>`; if (elem.tagName === "a" && content.trim().length > 0) { const href = elem.attributes.href; if (!href) return elem.content.map(stringifyHTMLElement).join(""); return `[${elem.content.map(stringifyHTMLElement).join("")}](${href})`; } return elem.content.map(stringifyHTMLElement).join(""); } /** Grabs all text content directly, ignoring HTML tags */ export function stringifyHTMLElementsUnformatted( elems: (SerializedHTMLElement | string)[] ): string { return elems.map(stringifyHTMLElementUnformatted).join(""); } /** Grabs all text content directly, ignoring HTML tags */ function stringifyHTMLElementUnformatted(elem: SerializedHTMLElement | string): string { if (typeof elem === "string") return elem; return elem.content.map(stringifyHTMLElementUnformatted).join(""); }
chat-ui/src/lib/server/websearch/markdown/utils/stringify.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/markdown/utils/stringify.ts", "repo_id": "chat-ui", "token_count": 1149 }
63
import type { WebSearchSource } from "$lib/types/WebSearch"; import type { Message } from "$lib/types/Message"; import type { Assistant } from "$lib/types/Assistant"; import { getWebSearchProvider, searchWeb } from "./endpoints"; import { generateQuery } from "./generateQuery"; import { isURLStringLocal } from "$lib/server/isURLLocal"; import { isURL } from "$lib/utils/isUrl"; import z from "zod"; import JSON5 from "json5"; import { env } from "$env/dynamic/private"; import { makeGeneralUpdate } from "../update"; import type { MessageWebSearchUpdate } from "$lib/types/MessageUpdate"; const listSchema = z.array(z.string()).default([]); const allowList = listSchema.parse(JSON5.parse(env.WEBSEARCH_ALLOWLIST)); const blockList = listSchema.parse(JSON5.parse(env.WEBSEARCH_BLOCKLIST)); export async function* search( messages: Message[], ragSettings?: Assistant["rag"], query?: string ): AsyncGenerator< MessageWebSearchUpdate, { searchQuery: string; pages: WebSearchSource[] }, undefined > { if (ragSettings && ragSettings?.allowedLinks.length > 0) { yield makeGeneralUpdate({ message: "Using links specified in Assistant" }); return { searchQuery: "", pages: await directLinksToSource(ragSettings.allowedLinks).then(filterByBlockList), }; } const searchQuery = query ?? (await generateQuery(messages)); yield makeGeneralUpdate({ message: `Searching ${getWebSearchProvider()}`, args: [searchQuery] }); // handle the global and (optional) rag lists if (ragSettings && ragSettings?.allowedDomains.length > 0) { yield makeGeneralUpdate({ message: "Filtering on specified domains" }); } const filters = buildQueryFromSiteFilters( [...(ragSettings?.allowedDomains ?? []), ...allowList], blockList ); const searchQueryWithFilters = `${filters} ${searchQuery}`; const searchResults = await searchWeb(searchQueryWithFilters).then(filterByBlockList); return { searchQuery: searchQueryWithFilters, pages: searchResults, }; } // ---------- // Utils function filterByBlockList(results: WebSearchSource[]): WebSearchSource[] { return results.filter((result) => !blockList.some((blocked) => result.link.includes(blocked))); } function buildQueryFromSiteFilters(allow: string[], block: string[]) { return ( allow.map((item) => "site:" + item).join(" OR ") + " " + block.map((item) => "-site:" + item).join(" ") ); } async function directLinksToSource(links: string[]): Promise<WebSearchSource[]> { if (env.ENABLE_LOCAL_FETCH !== "true") { const localLinks = await Promise.all(links.map(isURLStringLocal)); links = links.filter((_, index) => !localLinks[index]); } return links.filter(isURL).map((link) => ({ link, title: "", text: [""], })); }
chat-ui/src/lib/server/websearch/search/search.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/search.ts", "repo_id": "chat-ui", "token_count": 873 }
64
import type { Timestamps } from "./Timestamps"; export interface ConversationStats extends Timestamps { date: { at: Date; span: "day" | "week" | "month"; field: "updatedAt" | "createdAt"; }; type: "conversation" | "message"; /** _id => number of conversations/messages in the month */ distinct: "sessionId" | "userId" | "userOrSessionId" | "_id"; count: number; }
chat-ui/src/lib/types/ConversationStats.ts/0
{ "file_path": "chat-ui/src/lib/types/ConversationStats.ts", "repo_id": "chat-ui", "token_count": 134 }
65
import type { ObjectId } from "mongodb"; import type { Timestamps } from "./Timestamps"; export interface User extends Timestamps { _id: ObjectId; username?: string; name: string; email?: string; avatarUrl: string | undefined; hfUserId: string; isAdmin?: boolean; isEarlyAccess?: boolean; }
chat-ui/src/lib/types/User.ts/0
{ "file_path": "chat-ui/src/lib/types/User.ts", "repo_id": "chat-ui", "token_count": 100 }
66
type Gen<T, TReturn> = AsyncGenerator<T, TReturn, undefined>; type GenPromiseMap<T, TReturn> = Map< Gen<T, TReturn>, Promise<{ gen: Gen<T, TReturn> } & IteratorResult<T, TReturn>> >; /** Merges multiple async generators into a single async generator that yields values from all of them in parallel. */ export async function* mergeAsyncGenerators<T, TReturn>( generators: Gen<T, TReturn>[] ): Gen<T, TReturn[]> { const promises: GenPromiseMap<T, TReturn> = new Map(); const results: Map<Gen<T, TReturn>, TReturn> = new Map(); for (const gen of generators) { promises.set( gen, gen.next().then((result) => ({ gen, ...result })) ); } while (promises.size) { const { gen, value, done } = await Promise.race(promises.values()); if (done) { results.set(gen, value as TReturn); promises.delete(gen); } else { promises.set( gen, gen.next().then((result) => ({ gen, ...result })) ); yield value as T; } } const orderedResults = generators.map((gen) => results.get(gen) as TReturn); return orderedResults; }
chat-ui/src/lib/utils/mergeAsyncGenerators.ts/0
{ "file_path": "chat-ui/src/lib/utils/mergeAsyncGenerators.ts", "repo_id": "chat-ui", "token_count": 407 }
67
import type { Conversation } from "$lib/types/Conversation"; import type { Message } from "$lib/types/Message"; import { v4 } from "uuid"; export function addSibling( conv: Pick<Conversation, "messages" | "rootMessageId">, message: Omit<Message, "id">, siblingId: Message["id"] ): Message["id"] { if (conv.messages.length === 0) { throw new Error("Cannot add a sibling to an empty conversation"); } if (!conv.rootMessageId) { throw new Error("Cannot add a sibling to a legacy conversation"); } const sibling = conv.messages.find((m) => m.id === siblingId); if (!sibling) { throw new Error("The sibling message doesn't exist"); } if (!sibling.ancestors || sibling.ancestors?.length === 0) { throw new Error("The sibling message is the root message, therefore we can't add a sibling"); } const messageId = v4(); conv.messages.push({ ...message, id: messageId, ancestors: sibling.ancestors, children: [], }); const nearestAncestorId = sibling.ancestors[sibling.ancestors.length - 1]; const nearestAncestor = conv.messages.find((m) => m.id === nearestAncestorId); if (nearestAncestor) { if (nearestAncestor.children) { nearestAncestor.children.push(messageId); } else nearestAncestor.children = [messageId]; } return messageId; }
chat-ui/src/lib/utils/tree/addSibling.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/addSibling.ts", "repo_id": "chat-ui", "token_count": 439 }
68
import { collections } from "$lib/server/database"; import { authCondition } from "$lib/server/auth"; import { z } from "zod"; import { models } from "$lib/server/models"; import { ObjectId } from "mongodb"; export async function GET({ locals, params }) { const id = z.string().parse(params.id); const convId = new ObjectId(id); if (locals.user?._id || locals.sessionId) { const conv = await collections.conversations.findOne({ _id: convId, ...authCondition(locals), }); if (conv) { const res = { id: conv._id, title: conv.title, updatedAt: conv.updatedAt, modelId: conv.model, assistantId: conv.assistantId, messages: conv.messages.map((message) => ({ content: message.content, from: message.from, id: message.id, createdAt: message.createdAt, updatedAt: message.updatedAt, webSearch: message.webSearch, files: message.files, updates: message.updates, })), modelTools: models.find((m) => m.id == conv.model)?.tools ?? false, }; return Response.json(res); } else { return Response.json({ message: "Conversation not found" }, { status: 404 }); } } else { return Response.json({ message: "Must have session cookie" }, { status: 401 }); } }
chat-ui/src/routes/api/conversation/[id]/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/conversation/[id]/+server.ts", "repo_id": "chat-ui", "token_count": 478 }
69
<script lang="ts"> import ChatWindow from "$lib/components/chat/ChatWindow.svelte"; import { pendingMessage } from "$lib/stores/pendingMessage"; import { isAborted } from "$lib/stores/isAborted"; import { onMount } from "svelte"; import { page } from "$app/stores"; import { goto, invalidateAll } from "$app/navigation"; import { base } from "$app/paths"; import { shareConversation } from "$lib/shareConversation"; import { ERROR_MESSAGES, error } from "$lib/stores/errors"; import { findCurrentModel } from "$lib/utils/models"; import { webSearchParameters } from "$lib/stores/webSearchParameters"; import type { Message } from "$lib/types/Message"; import { MessageUpdateStatus, MessageUpdateType, type MessageUpdate, } from "$lib/types/MessageUpdate"; import titleUpdate from "$lib/stores/titleUpdate"; import file2base64 from "$lib/utils/file2base64"; import { addChildren } from "$lib/utils/tree/addChildren"; import { addSibling } from "$lib/utils/tree/addSibling"; import { fetchMessageUpdates } from "$lib/utils/messageUpdates"; import { createConvTreeStore } from "$lib/stores/convTree"; import type { v4 } from "uuid"; import { useSettingsStore } from "$lib/stores/settings.js"; export let data; $: ({ messages } = data); let loading = false; let pending = false; let files: File[] = []; async function convFromShared() { try { loading = true; const res = await fetch(`${base}/conversation`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ fromShare: $page.params.id, model: data.model, }), }); if (!res.ok) { error.set(await res.text()); console.error("Error while creating conversation: " + (await res.text())); return; } const { conversationId } = await res.json(); return conversationId; } catch (err) { error.set(ERROR_MESSAGES.default); console.error(String(err)); throw err; } } // this function is used to send new message to the backends async function writeMessage({ prompt, messageId = $convTreeStore.leaf ?? undefined, isRetry = false, isContinue = false, }: { prompt?: string; messageId?: ReturnType<typeof v4>; isRetry?: boolean; isContinue?: boolean; }): Promise<void> { try { $isAborted = false; loading = true; pending = true; const base64Files = await Promise.all( (files ?? []).map((file) => file2base64(file).then((value) => ({ type: "base64" as const, value, mime: file.type, name: file.name, })) ) ); let messageToWriteToId: Message["id"] | undefined = undefined; // used for building the prompt, subtree of the conversation that goes from the latest message to the root if (isContinue && messageId) { if ((messages.find((msg) => msg.id === messageId)?.children?.length ?? 0) > 0) { $error = "Can only continue the last message"; } else { messageToWriteToId = messageId; } } else if (isRetry && messageId) { // two cases, if we're retrying a user message with a newPrompt set, // it means we're editing a user message // if we're retrying on an assistant message, newPrompt cannot be set // it means we're retrying the last assistant message for a new answer const messageToRetry = messages.find((message) => message.id === messageId); if (!messageToRetry) { $error = "Message not found"; } if (messageToRetry?.from === "user" && prompt) { // add a sibling to this message from the user, with the alternative prompt // add a children to that sibling, where we can write to const newUserMessageId = addSibling( { messages, rootMessageId: data.rootMessageId, }, { from: "user", content: prompt, files: messageToRetry.files, }, messageId ); messageToWriteToId = addChildren( { messages, rootMessageId: data.rootMessageId, }, { from: "assistant", content: "" }, newUserMessageId ); } else if (messageToRetry?.from === "assistant") { // we're retrying an assistant message, to generate a new answer // just add a sibling to the assistant answer where we can write to messageToWriteToId = addSibling( { messages, rootMessageId: data.rootMessageId, }, { from: "assistant", content: "" }, messageId ); } } else { // just a normal linear conversation, so we add the user message // and the blank assistant message back to back const newUserMessageId = addChildren( { messages, rootMessageId: data.rootMessageId, }, { from: "user", content: prompt ?? "", files: base64Files, createdAt: new Date(), updatedAt: new Date(), }, messageId ); if (!data.rootMessageId) { data.rootMessageId = newUserMessageId; } messageToWriteToId = addChildren( { messages, rootMessageId: data.rootMessageId, }, { from: "assistant", content: "", createdAt: new Date(), updatedAt: new Date(), }, newUserMessageId ); } messages = [...messages]; const userMessage = messages.find((message) => message.id === messageId); const messageToWriteTo = messages.find((message) => message.id === messageToWriteToId); if (!messageToWriteTo) { throw new Error("Message to write to not found"); } // disable websearch if assistant is present const hasAssistant = !!$page.data.assistant; const messageUpdatesAbortController = new AbortController(); const messageUpdatesIterator = await fetchMessageUpdates( $page.params.id, { base, inputs: prompt, messageId, isRetry, isContinue, webSearch: !hasAssistant && $webSearchParameters.useSearch, tools: $settings.tools, // preference for tools files: isRetry ? userMessage?.files : base64Files, }, messageUpdatesAbortController.signal ).catch((err) => { error.set(err.message); }); if (messageUpdatesIterator === undefined) return; files = []; const messageUpdates: MessageUpdate[] = []; for await (const update of messageUpdatesIterator) { if ($isAborted) { messageUpdatesAbortController.abort(); return; } // Remove null characters added due to remote keylogging prevention // See server code for more details if (update.type === MessageUpdateType.Stream) { update.token = update.token.replaceAll("\0", ""); } messageUpdates.push(update); if (update.type === MessageUpdateType.Stream && !$settings.disableStream) { messageToWriteTo.content += update.token; pending = false; messages = [...messages]; } else if ( update.type === MessageUpdateType.WebSearch || update.type === MessageUpdateType.Tool ) { messageToWriteTo.updates = [...(messageToWriteTo.updates ?? []), update]; messages = [...messages]; } else if ( update.type === MessageUpdateType.Status && update.status === MessageUpdateStatus.Error ) { $error = update.message ?? "An error has occurred"; } else if (update.type === MessageUpdateType.Title) { const convInData = data.conversations.find(({ id }) => id === $page.params.id); if (convInData) { convInData.title = update.title; $titleUpdate = { title: update.title, convId: $page.params.id, }; } } else if (update.type === MessageUpdateType.File) { messageToWriteTo.files = [ ...(messageToWriteTo.files ?? []), { type: "hash", value: update.sha, mime: update.mime, name: update.name }, ]; messages = [...messages]; } } messageToWriteTo.updates = messageUpdates; } catch (err) { if (err instanceof Error && err.message.includes("overloaded")) { $error = "Too much traffic, please try again."; } else if (err instanceof Error && err.message.includes("429")) { $error = ERROR_MESSAGES.rateLimited; } else if (err instanceof Error) { $error = err.message; } else { $error = ERROR_MESSAGES.default; } console.error(err); } finally { loading = false; pending = false; await invalidateAll(); } } async function voteMessage(score: Message["score"], messageId: string) { let conversationId = $page.params.id; let oldScore: Message["score"] | undefined; // optimistic update to avoid waiting for the server messages = messages.map((message) => { if (message.id === messageId) { oldScore = message.score; return { ...message, score }; } return message; }); try { await fetch(`${base}/conversation/${conversationId}/message/${messageId}/vote`, { method: "POST", body: JSON.stringify({ score }), }); } catch { // revert score on any error messages = messages.map((message) => { return message.id !== messageId ? message : { ...message, score: oldScore }; }); } } onMount(async () => { // only used in case of creating new conversations (from the parent POST endpoint) if ($pendingMessage) { files = $pendingMessage.files; await writeMessage({ prompt: $pendingMessage.content }); $pendingMessage = undefined; } }); async function onMessage(event: CustomEvent<string>) { if (!data.shared) { await writeMessage({ prompt: event.detail }); } else { await convFromShared() .then(async (convId) => { await goto(`${base}/conversation/${convId}`, { invalidateAll: true }); }) .then(async () => await writeMessage({ prompt: event.detail })) .finally(() => (loading = false)); } } async function onRetry(event: CustomEvent<{ id: Message["id"]; content?: string }>) { if (!data.shared) { await writeMessage({ prompt: event.detail.content, messageId: event.detail.id, isRetry: true, }); } else { await convFromShared() .then(async (convId) => { await goto(`${base}/conversation/${convId}`, { invalidateAll: true }); }) .then( async () => await writeMessage({ prompt: event.detail.content, messageId: event.detail.id, isRetry: true, }) ) .finally(() => (loading = false)); } } async function onContinue(event: CustomEvent<{ id: Message["id"] }>) { if (!data.shared) { writeMessage({ messageId: event.detail.id, isContinue: true }); } else { await convFromShared() .then(async (convId) => { await goto(`${base}/conversation/${convId}`, { invalidateAll: true }); }) .then( async () => await writeMessage({ messageId: event.detail.id, isContinue: true, }) ) .finally(() => (loading = false)); } } $: $page.params.id, (($isAborted = true), (loading = false), ($convTreeStore.editing = null)); $: title = data.conversations.find((conv) => conv.id === $page.params.id)?.title ?? data.title; const convTreeStore = createConvTreeStore(); const settings = useSettingsStore(); </script> <svelte:head> <title>{title}</title> <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.16.8/dist/katex.min.css" integrity="sha384-GvrOXuhMATgEsSwCs4smul74iXGOixntILdUW9XmUC6+HX0sLNAK3q71HotJqlAn" crossorigin="anonymous" /> </svelte:head> <ChatWindow {loading} {pending} {messages} shared={data.shared} preprompt={data.preprompt} bind:files on:message={onMessage} on:retry={onRetry} on:continue={onContinue} on:vote={(event) => voteMessage(event.detail.score, event.detail.id)} on:share={() => shareConversation($page.params.id, data.title)} on:stop={() => (($isAborted = true), (loading = false))} models={data.models} currentModel={findCurrentModel([...data.models, ...data.oldModels], data.model)} assistant={data.assistant} />
chat-ui/src/routes/conversation/[id]/+page.svelte/0
{ "file_path": "chat-ui/src/routes/conversation/[id]/+page.svelte", "repo_id": "chat-ui", "token_count": 4710 }
70
<script lang="ts"> import { page } from "$app/stores"; import { base } from "$app/paths"; import { goto } from "$app/navigation"; import { onMount } from "svelte"; import { env as envPublic } from "$env/dynamic/public"; import ChatWindow from "$lib/components/chat/ChatWindow.svelte"; import { findCurrentModel } from "$lib/utils/models"; import { useSettingsStore } from "$lib/stores/settings"; import { ERROR_MESSAGES, error } from "$lib/stores/errors"; import { pendingMessage } from "$lib/stores/pendingMessage"; export let data; let loading = false; let files: File[] = []; const settings = useSettingsStore(); const modelId = $page.params.model; async function createConversation(message: string) { try { loading = true; // check if $settings.activeModel is a valid model // else check if it's an assistant, and use that model // else use the first model const validModels = data.models.map((model) => model.id); let model; if (validModels.includes($settings.activeModel)) { model = $settings.activeModel; } else { if (validModels.includes(data.assistant?.modelId)) { model = data.assistant?.modelId; } else { model = data.models[0].id; } } const res = await fetch(`${base}/conversation`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ model, preprompt: $settings.customPrompts[$settings.activeModel], }), }); if (!res.ok) { error.set("Error while creating conversation, try again."); console.error("Error while creating conversation: " + (await res.text())); return; } const { conversationId } = await res.json(); // Ugly hack to use a store as temp storage, feel free to improve ^^ pendingMessage.set({ content: message, files, }); // invalidateAll to update list of conversations await goto(`${base}/conversation/${conversationId}`, { invalidateAll: true }); } catch (err) { error.set(ERROR_MESSAGES.default); console.error(err); } finally { loading = false; } } onMount(async () => { settings.instantSet({ activeModel: modelId, }); const query = $page.url.searchParams.get("q"); if (query) createConversation(query); }); </script> <svelte:head> <meta property="og:title" content={modelId + " - " + envPublic.PUBLIC_APP_NAME} /> <meta property="og:type" content="link" /> <meta property="og:description" content={`Use ${modelId} with ${envPublic.PUBLIC_APP_NAME}`} /> <meta property="og:image" content="{envPublic.PUBLIC_ORIGIN || $page.url.origin}{base}/models/{modelId}/thumbnail.png" /> <meta property="og:url" content={$page.url.href} /> <meta name="twitter:card" content="summary_large_image" /> </svelte:head> <ChatWindow on:message={(ev) => createConversation(ev.detail)} {loading} currentModel={findCurrentModel([...data.models, ...data.oldModels], modelId)} models={data.models} bind:files />
chat-ui/src/routes/models/[...model]/+page.svelte/0
{ "file_path": "chat-ui/src/routes/models/[...model]/+page.svelte", "repo_id": "chat-ui", "token_count": 1103 }
71
<script lang="ts"> import type { PageData, ActionData } from "./$types"; import { page } from "$app/stores"; import AssistantSettings from "$lib/components/AssistantSettings.svelte"; export let data: PageData; export let form: ActionData; let assistant = data.assistants.find((el) => el._id.toString() === $page.params.assistantId); </script> <AssistantSettings bind:form {assistant} models={data.models} />
chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/edit/+page@settings.svelte/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/edit/+page@settings.svelte", "repo_id": "chat-ui", "token_count": 129 }
72
import { authCondition, requiresUser } from "$lib/server/auth.js"; import { collections } from "$lib/server/database.js"; import { editableToolSchema } from "$lib/server/tools/index.js"; import { usageLimits } from "$lib/server/usageLimits.js"; import { generateSearchTokens } from "$lib/utils/searchTokens.js"; import { error, fail } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; export const actions = { default: async ({ request, locals }) => { // XXX: feature_flag_tools if (!locals.user?.isEarlyAccess) { error(403, "You need to be an early access user to create tools"); } const body = await request.formData(); const toolStringified = body.get("tool"); if (!toolStringified || typeof toolStringified !== "string") { error(400, "Tool is required"); } const parse = editableToolSchema.safeParse(JSON.parse(toolStringified)); if (!parse.success) { // Loop through the errors array and create a custom errors array const errors = parse.error.errors.map((error) => { return { field: error.path[0], message: error.message, }; }); return fail(400, { error: true, errors }); } // can only create tools when logged in, IF login is setup if (!locals.user && requiresUser) { const errors = [{ field: "description", message: "Must be logged in. Unauthorized" }]; return fail(400, { error: true, errors }); } const toolCounts = await collections.tools.countDocuments({ createdById: locals.user?._id }); if (usageLimits?.tools && toolCounts > usageLimits.tools) { const errors = [ { field: "description", message: "You have reached the maximum number of tools. Delete some to continue.", }, ]; return fail(400, { error: true, errors }); } if (!locals.user || !authCondition(locals)) { error(401, "Unauthorized"); } const { insertedId } = await collections.tools.insertOne({ ...parse.data, type: "community" as const, _id: new ObjectId(), createdById: locals.user?._id, createdByName: locals.user?.username, createdAt: new Date(), updatedAt: new Date(), last24HoursUseCount: 0, useCount: 0, // XXX: feature_flag_tools // since this is scoped to internal team members only, we can assume that they should all be public featured: true, searchTokens: generateSearchTokens(parse.data.displayName), }); return { toolId: insertedId.toString() }; }, };
chat-ui/src/routes/tools/new/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/tools/new/+page.server.ts", "repo_id": "chat-ui", "token_count": 835 }
73
{ "$schema": "https://vega.github.io/schema/vega-lite/v4.json", "data": { "values": "<DVC_METRIC_DATA>" }, "title": "<DVC_METRIC_TITLE>", "mark": { "type": "line" }, "encoding": { "x": { "field": "<DVC_METRIC_X>", "type": "quantitative", "title": "<DVC_METRIC_X_LABEL>" }, "y": { "field": "<DVC_METRIC_Y>", "type": "quantitative", "title": "<DVC_METRIC_Y_LABEL>", "scale": { "zero": false } }, "color": { "field": "rev", "type": "nominal" } }, "transform": [ { "loess": "<DVC_METRIC_Y>", "on": "<DVC_METRIC_X>", "groupby": [ "rev" ], "bandwidth": 0.3 } ] }
datasets/.dvc/plots/smooth.json/0
{ "file_path": "datasets/.dvc/plots/smooth.json", "repo_id": "datasets", "token_count": 569 }
74
# How to contribute to Datasets? [![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg)](CODE_OF_CONDUCT.md) Datasets is an open source project, so all contributions and suggestions are welcome. You can contribute in many different ways: giving ideas, answering questions, reporting bugs, proposing enhancements, improving the documentation, fixing bugs,... Many thanks in advance to every contributor. In order to facilitate healthy, constructive behavior in an open and inclusive community, we all respect and abide by our [code of conduct](CODE_OF_CONDUCT.md). ## How to work on an open Issue? You have the list of open Issues at: https://github.com/huggingface/datasets/issues Some of them may have the label `help wanted`: that means that any contributor is welcomed! If you would like to work on any of the open Issues: 1. Make sure it is not already assigned to someone else. You have the assignee (if any) on the top of the right column of the Issue page. 2. You can self-assign it by commenting on the Issue page with the keyword: `#self-assign`. 3. Work on your self-assigned issue and eventually create a Pull Request. ## How to create a Pull Request? If you want to add a dataset see specific instructions in the section [*How to add a dataset*](#how-to-add-a-dataset). 1. Fork the [repository](https://github.com/huggingface/datasets) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote: ```bash git clone git@github.com:<your Github handle>/datasets.git cd datasets git remote add upstream https://github.com/huggingface/datasets.git ``` 3. Create a new branch to hold your development changes: ```bash git checkout -b a-descriptive-name-for-my-changes ``` **do not** work on the `main` branch. 4. Set up a development environment by running the following command in a virtual environment: ```bash pip install -e ".[dev]" ``` (If datasets was already installed in the virtual environment, remove it with `pip uninstall datasets` before reinstalling it in editable mode with the `-e` flag.) 5. Develop the features on your branch. 6. Format your code. Run `black` and `ruff` so that your newly added files look nice with the following command: ```bash make style ``` 7. _(Optional)_ You can also use [`pre-commit`](https://pre-commit.com/) to format your code automatically each time run `git commit`, instead of running `make style` manually. To do this, install `pre-commit` via `pip install pre-commit` and then run `pre-commit install` in the project's root directory to set up the hooks. Note that if any files were formatted by `pre-commit` hooks during committing, you have to run `git commit` again . 8. Once you're happy with your contribution, add your changed files and make a commit to record your changes locally: ```bash git add -u git commit ``` It is a good idea to sync your copy of the code with the original repository regularly. This way you can quickly account for changes: ```bash git fetch upstream git rebase upstream/main ``` 9. Once you are satisfied, push the changes to your fork repo using: ```bash git push -u origin a-descriptive-name-for-my-changes ``` Go the webpage of your fork on GitHub. Click on "Pull request" to send your to the project maintainers for review. ## How to add a dataset You can share your dataset on https://huggingface.co/datasets directly using your account, see the documentation: * [Create a dataset and upload files on the website](https://huggingface.co/docs/datasets/upload_dataset) * [Advanced guide using the CLI](https://huggingface.co/docs/datasets/share) ## How to contribute to the dataset cards Improving the documentation of datasets is an ever-increasing effort, and we invite users to contribute by sharing their insights with the community in the `README.md` dataset cards provided for each dataset. If you see that a dataset card is missing information that you are in a position to provide (as an author of the dataset or as an experienced user), the best thing you can do is to open a Pull Request on the Hugging Face Hub. To do, go to the "Files and versions" tab of the dataset page and edit the `README.md` file. We provide: * a [template](https://github.com/huggingface/datasets/blob/main/templates/README.md) * a [guide](https://github.com/huggingface/datasets/blob/main/templates/README_guide.md) describing what information should go into each of the paragraphs * and if you need inspiration, we recommend looking through a [completed example](https://huggingface.co/datasets/eli5/blob/main/README.md) If you are a **dataset author**... you know what to do, it is your dataset after all ;) ! We would especially appreciate if you could help us fill in information about the process of creating the dataset, and take a moment to reflect on its social impact and possible limitations if you haven't already done so in the dataset paper or in another data statement. If you are a **user of a dataset**, the main source of information should be the dataset paper if it is available: we recommend pulling information from there into the relevant paragraphs of the template. We also eagerly welcome discussions on the [Considerations for Using the Data](https://github.com/huggingface/datasets/blob/main/templates/README_guide.md#considerations-for-using-the-data) based on existing scholarship or personal experience that would benefit the whole community. Finally, if you want more information on the how and why of dataset cards, we strongly recommend reading the foundational works [Datasheets for Datasets](https://arxiv.org/abs/1803.09010) and [Data Statements for NLP](https://www.aclweb.org/anthology/Q18-1041/). Thank you for your contribution! ## Code of conduct This project adheres to the HuggingFace [code of conduct](CODE_OF_CONDUCT.md). By participating, you are expected to abide by this code.
datasets/CONTRIBUTING.md/0
{ "file_path": "datasets/CONTRIBUTING.md", "repo_id": "datasets", "token_count": 1715 }
75
# Process audio data This guide shows specific methods for processing audio datasets. Learn how to: - Resample the sampling rate. - Use [`~Dataset.map`] with audio datasets. For a guide on how to process any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./process">general process guide</a>. ## Cast The [`~Dataset.cast_column`] function is used to cast a column to another feature to be decoded. When you use this function with the [`Audio`] feature, you can resample the sampling rate: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) ``` Audio files are decoded and resampled on-the-fly, so the next time you access an example, the audio file is resampled to 16kHz: ```py >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/resample.gif"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/resample-dark.gif"/> </div> ## Map The [`~Dataset.map`] function helps preprocess your entire dataset at once. Depending on the type of model you're working with, you'll need to either load a [feature extractor](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoFeatureExtractor) or a [processor](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoProcessor). - For pretrained speech recognition models, load a feature extractor and tokenizer and combine them in a `processor`: ```py >>> from transformers import AutoTokenizer, AutoFeatureExtractor, AutoProcessor >>> model_checkpoint = "facebook/wav2vec2-large-xlsr-53" # after defining a vocab.json file you can instantiate a tokenizer object: >>> tokenizer = AutoTokenizer("./vocab.json", unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|") >>> feature_extractor = AutoFeatureExtractor.from_pretrained(model_checkpoint) >>> processor = AutoProcessor.from_pretrained(feature_extractor=feature_extractor, tokenizer=tokenizer) ``` - For fine-tuned speech recognition models, you only need to load a `processor`: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") ``` When you use [`~Dataset.map`] with your preprocessing function, include the `audio` column to ensure you're actually resampling the audio data: ```py >>> def prepare_dataset(batch): ... audio = batch["audio"] ... batch["input_values"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0] ... batch["input_length"] = len(batch["input_values"]) ... with processor.as_target_processor(): ... batch["labels"] = processor(batch["sentence"]).input_ids ... return batch >>> dataset = dataset.map(prepare_dataset, remove_columns=dataset.column_names) ```
datasets/docs/source/audio_process.mdx/0
{ "file_path": "datasets/docs/source/audio_process.mdx", "repo_id": "datasets", "token_count": 1186 }
76
# Datasets <img class="float-left !m-0 !border-0 !dark:border-0 !shadow-none !max-w-lg w-[150px]" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/datasets_logo.png"/> 🤗 Datasets is a library for easily accessing and sharing datasets for Audio, Computer Vision, and Natural Language Processing (NLP) tasks. Load a dataset in a single line of code, and use our powerful data processing methods to quickly get your dataset ready for training in a deep learning model. Backed by the Apache Arrow format, process large datasets with zero-copy reads without any memory constraints for optimal speed and efficiency. We also feature a deep integration with the [Hugging Face Hub](https://huggingface.co/datasets), allowing you to easily load and share a dataset with the wider machine learning community. Find your dataset today on the [Hugging Face Hub](https://huggingface.co/datasets), and take an in-depth look inside of it with the live viewer. <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./tutorial" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Tutorials</div> <p class="text-gray-700">Learn the basics and become familiar with loading, accessing, and processing a dataset. Start here if you are using 🤗 Datasets for the first time!</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./how_to" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div> <p class="text-gray-700">Practical guides to help you achieve a specific goal. Take a look at these guides to learn how to use 🤗 Datasets to solve real-world problems.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./about_arrow" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div> <p class="text-gray-700">High-level explanations for building a better understanding about important topics such as the underlying data format, the cache, and how datasets are generated.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./package_reference/main_classes" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div> <p class="text-gray-700">Technical descriptions of how 🤗 Datasets classes and methods work.</p> </a> </div> </div>
datasets/docs/source/index.mdx/0
{ "file_path": "datasets/docs/source/index.mdx", "repo_id": "datasets", "token_count": 1014 }
77
# Share a dataset using the CLI At Hugging Face, we are on a mission to democratize good Machine Learning and we believe in the value of open source. That's why we designed 🤗 Datasets so that anyone can share a dataset with the greater ML community. There are currently thousands of datasets in over 100 languages in the Hugging Face Hub, and the Hugging Face team always welcomes new contributions! Dataset repositories offer features such as: - Free dataset hosting - Dataset versioning - Commit history and diffs - Metadata for discoverability - Dataset cards for documentation, licensing, limitations, etc. - [Dataset Viewer](../hub/datasets-viewer) This guide will show you how to share a dataset folder or repository that can be easily accessed by anyone. <a id='upload_dataset_repo'></a> ## Add a dataset You can share your dataset with the community with a dataset repository on the Hugging Face Hub. It can also be a private dataset if you want to control who has access to it. In a dataset repository, you can host all your data files and [configure your dataset](./repository_structure#define-your-splits-in-yaml) to define which file goes to which split. The following formats are supported: CSV, TSV, JSON, JSON lines, text, Parquet, Arrow, SQLite, WebDataset. Many kinds of compressed file types are also supported: GZ, BZ2, LZ4, LZMA or ZSTD. For example, your dataset can be made of `.json.gz` files. On the other hand, if your dataset is not in a supported format or if you want more control over how your dataset is loaded, you can write your own dataset script. Note that some feature are not available for datasets defined using a loading scripts, such as the Dataset Viewer. Users also have to pass `trust_remote_code=True` to load the dataset. It is generally recommended for datasets to not rely on a loading script if possible, to benefit from all the Hub's features. When loading a dataset from the Hub, all the files in the supported formats are loaded, following the [repository structure](./repository_structure). However if there's a dataset script, it is downloaded and executed to download and prepare the dataset instead. For more information on how to load a dataset from the Hub, take a look at the [load a dataset from the Hub](./load_hub) tutorial. ### Create the repository Sharing a community dataset will require you to create an account on [hf.co](https://huggingface.co/join) if you don't have one yet. You can directly create a [new dataset repository](https://huggingface.co/login?next=%2Fnew-dataset) from your account on the Hugging Face Hub, but this guide will show you how to upload a dataset from the terminal. 1. Make sure you are in the virtual environment where you installed Datasets, and run the following command: ``` huggingface-cli login ``` 2. Login using your Hugging Face Hub credentials, and create a new dataset repository: ``` huggingface-cli repo create my-cool-dataset --type dataset ``` Add the `-organization` flag to create a repository under a specific organization: ``` huggingface-cli repo create my-cool-dataset --type dataset --organization your-org-name ``` ## Prepare your files Check your directory to ensure the only files you're uploading are: - The data files of the dataset - The dataset card `README.md` - (optional) `your_dataset_name.py` is your dataset loading script (optional if your data files are already in the supported formats csv/jsonl/json/parquet/txt). To create a dataset script, see the [dataset script](dataset_script) page. Note that some feature are not available for datasets defined using a loading scripts, such as the Dataset Viewer. Users also have to pass `trust_remote_code=True` to load the dataset. It is generally recommended for datasets to not rely on a loading script if possible, to benefit from all the Hub's features. ## huggingface-cli upload Use the `huggingface-cli upload` command to upload files to the Hub directly. Internally, it uses the same [`upload_file`] and [`upload_folder`] helpers described in the [Upload guide](../huggingface_hub/guides/upload). In the examples below, we will walk through the most common use cases. For a full list of available options, you can run: ```bash >>> huggingface-cli upload --help ``` For more general information about `huggingface-cli` you can check the [CLI guide](../huggingface_hub/guides/cli). ### Upload an entire folder The default usage for this command is: ```bash # Usage: huggingface-cli upload [dataset_repo_id] [local_path] [path_in_repo] --repo-type dataset ``` To upload the current directory at the root of the repo, use: ```bash >>> huggingface-cli upload my-cool-dataset . . --repo-type dataset https://huggingface.co/datasets/Wauplin/my-cool-dataset/tree/main/ ``` <Tip> If the repo doesn't exist yet, it will be created automatically. </Tip> You can also upload a specific folder: ```bash >>> huggingface-cli upload my-cool-dataset ./data . --repo-type dataset https://huggingface.co/datasetsWauplin/my-cool-dataset/tree/main/ ``` Finally, you can upload a folder to a specific destination on the repo: ```bash >>> huggingface-cli upload my-cool-dataset ./path/to/curated/data /data/train --repo-type dataset https://huggingface.co/datasetsWauplin/my-cool-dataset/tree/main/data/train ``` ### Upload a single file You can also upload a single file by setting `local_path` to point to a file on your machine. If that's the case, `path_in_repo` is optional and will default to the name of your local file: ```bash >>> huggingface-cli upload Wauplin/my-cool-dataset ./files/train.csv --repo-type dataset https://huggingface.co/datasetsWauplin/my-cool-dataset/blob/main/train.csv ``` If you want to upload a single file to a specific directory, set `path_in_repo` accordingly: ```bash >>> huggingface-cli upload Wauplin/my-cool-dataset ./files/train.csv /data/train.csv --repo-type dataset https://huggingface.co/datasetsWauplin/my-cool-dataset/blob/main/data/train.csv ``` ### Upload multiple files To upload multiple files from a folder at once without uploading the entire folder, use the `--include` and `--exclude` patterns. It can also be combined with the `--delete` option to delete files on the repo while uploading new ones. In the example below, we sync the local Space by deleting remote files and uploading all CSV files: ```bash # Sync local Space with Hub (upload new CSV files, delete removed files) >>> huggingface-cli upload Wauplin/my-cool-dataset --repo-type dataset --include="/data/*.csv" --delete="*" --commit-message="Sync local dataset with Hub" ... ``` ### Upload to an organization To upload content to a repo owned by an organization instead of a personal repo, you must explicitly specify it in the `repo_id`: ```bash >>> huggingface-cli upload MyCoolOrganization/my-cool-dataset . . --repo-type dataset https://huggingface.co/datasetsMyCoolOrganization/my-cool-dataset/tree/main/ ``` ### Upload to a specific revision By default, files are uploaded to the `main` branch. If you want to upload files to another branch or reference, use the `--revision` option: ```bash # Upload files to a PR huggingface-cli upload bigcode/the-stack . . --repo-type dataset --revision refs/pr/104 ... ``` **Note:** if `revision` does not exist and `--create-pr` is not set, a branch will be created automatically from the `main` branch. ### Upload and create a PR If you don't have the permission to push to a repo, you must open a PR and let the authors know about the changes you want to make. This can be done by setting the `--create-pr` option: ```bash # Create a PR and upload the files to it >>> huggingface-cli upload bigcode/the-stack --repo-type dataset --revision refs/pr/104 --create-pr . . https://huggingface.co/datasets/bigcode/the-stack/blob/refs%2Fpr%2F104/ ``` ### Upload at regular intervals In some cases, you might want to push regular updates to a repo. For example, this is useful if your dataset is growing over time and you want to upload the data folder every 10 minutes. You can do this using the `--every` option: ```bash # Upload new logs every 10 minutes huggingface-cli upload my-cool-dynamic-dataset data/ --every=10 ``` ### Specify a commit message Use the `--commit-message` and `--commit-description` to set a custom message and description for your commit instead of the default one ```bash >>> huggingface-cli upload Wauplin/my-cool-dataset ./data . --repo-type dataset --commit-message="Version 2" --commit-description="Train size: 4321. Check Dataset Viewer for more details." ... https://huggingface.co/datasetsWauplin/my-cool-dataset/tree/main ``` ### Specify a token To upload files, you must use a token. By default, the token saved locally (using `huggingface-cli login`) will be used. If you want to authenticate explicitly, use the `--token` option: ```bash >>> huggingface-cli upload Wauplin/my-cool-dataset ./data . --repo-type dataset --token=hf_**** ... https://huggingface.co/datasetsWauplin/my-cool-data/tree/main ``` ### Quiet mode By default, the `huggingface-cli upload` command will be verbose. It will print details such as warning messages, information about the uploaded files, and progress bars. If you want to silence all of this, use the `--quiet` option. Only the last line (i.e. the URL to the uploaded files) is printed. This can prove useful if you want to pass the output to another command in a script. ```bash >>> huggingface-cli upload Wauplin/my-cool-dataset ./data . --repo-type dataset --quiet https://huggingface.co/datasets/Wauplin/my-cool-dataset/tree/main ``` ## Enjoy ! Congratulations, your dataset has now been uploaded to the Hugging Face Hub where anyone can load it in a single line of code! 🥳 ``` dataset = load_dataset("Wauplin/my-cool-dataset") ``` If your dataset is supported, it should also have a [Dataset Viewer](../hub/datasets-viewer) for everyone to explore the dataset content. Finally, don't forget to enrich the dataset card to document your dataset and make it discoverable! Check out the [Create a dataset card](dataset_card) guide to learn more.
datasets/docs/source/share.mdx/0
{ "file_path": "datasets/docs/source/share.mdx", "repo_id": "datasets", "token_count": 2919 }
78
__all__ = [ "DownloadConfig", "DownloadManager", "DownloadMode", "StreamingDownloadManager", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
datasets/src/datasets/download/__init__.py/0
{ "file_path": "datasets/src/datasets/download/__init__.py", "repo_id": "datasets", "token_count": 77 }
79