repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/peft
hf_public_repos/peft/tests/testing_common.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import os import pickle import re import tempfile from collections import OrderedDict from dataclasses import replace import torch import yaml from diffusers import StableDiffusionPipeline from peft import ( AdaLoraConfig, IA3Config, LoraConfig, PeftModel, PeftType, PrefixTuningConfig, PromptEncoderConfig, PromptLearningConfig, PromptTuningConfig, get_peft_model, get_peft_model_state_dict, prepare_model_for_int8_training, ) from peft.tuners.lora import LoraLayer from peft.utils import _get_submodules, infer_device from .testing_utils import get_state_dict CONFIG_TESTING_KWARGS = ( # IA³ { "target_modules": None, "feedforward_modules": None, }, # LoRA { "r": 8, "lora_alpha": 32, "target_modules": None, "lora_dropout": 0.05, "bias": "none", }, # prefix tuning { "num_virtual_tokens": 10, }, # prompt encoder { "num_virtual_tokens": 10, "encoder_hidden_size": 32, }, # prompt tuning { "num_virtual_tokens": 10, }, # AdaLoRA { "target_modules": None, }, ) CLASSES_MAPPING = { "ia3": (IA3Config, CONFIG_TESTING_KWARGS[0]), "lora": (LoraConfig, CONFIG_TESTING_KWARGS[1]), "prefix_tuning": (PrefixTuningConfig, CONFIG_TESTING_KWARGS[2]), "prompt_encoder": (PromptEncoderConfig, CONFIG_TESTING_KWARGS[3]), "prompt_tuning": (PromptTuningConfig, CONFIG_TESTING_KWARGS[4]), "adalora": (AdaLoraConfig, CONFIG_TESTING_KWARGS[5]), } # Adapted from https://github.com/huggingface/transformers/blob/48327c57182fdade7f7797d1eaad2d166de5c55b/src/transformers/activations.py#LL166C7-L166C22 class ClassInstantier(OrderedDict): def __getitem__(self, key, *args, **kwargs): # check if any of the kwargs is inside the config class kwargs if any(kwarg in self[key][1] for kwarg in kwargs): new_config_kwargs = self[key][1].copy() new_config_kwargs.update(kwargs) return (self[key][0], new_config_kwargs) return super().__getitem__(key, *args, **kwargs) def get_grid_parameters(self, grid_parameters, filter_params_func=None): r""" Returns a list of all possible combinations of the parameters in the config classes. Args: grid_parameters (`dict`): A dictionary containing the parameters to be tested. There should be at least the key "model_ids" which contains a list of model ids to be tested. The other keys should be the name of the config class post-fixed with "_kwargs" and the value should be a dictionary containing the parameters to be tested for that config class. filter_params_func (`callable`, `optional`): A function that takes a list of tuples and returns a list of tuples. This function is used to filter out the tests that needs for example to be skipped. Returns: generated_tests (`list`): A list of tuples containing the name of the test, the model id, the config class and the config class kwargs. """ generated_tests = [] model_list = grid_parameters["model_ids"] task_type = grid_parameters["task_type"] if "task_type" in grid_parameters else None for model_id in model_list: for key, value in self.items(): if "{}_kwargs".format(key) in grid_parameters: peft_configs = [] current_peft_config = value[1].copy() for current_key, current_value in grid_parameters[f"{key}_kwargs"].items(): for kwarg in current_value: current_peft_config.update({current_key: kwarg}) if task_type is not None: current_peft_config.update({"task_type": task_type}) peft_configs.append(current_peft_config.copy()) else: current_peft_config = value[1].copy() if task_type is not None: current_peft_config.update({"task_type": task_type}) peft_configs = [current_peft_config] for peft_config in peft_configs: generated_tests.append((f"test_{model_id}_{key}", model_id, value[0], peft_config)) if filter_params_func is not None: generated_tests = filter_params_func(generated_tests) return generated_tests PeftTestConfigManager = ClassInstantier(CLASSES_MAPPING) class PeftCommonTester: r""" A large testing suite for testing common functionality of the PEFT models. Attributes: torch_device (`torch.device`): The device on which the tests will be run. transformers_class (`transformers.PreTrainedModel`): The transformers class that is being tested. """ torch_device = infer_device() transformers_class = None def prepare_inputs_for_common(self): raise NotImplementedError def check_modelcard(self, tmp_dirname, model): # check the generated README.md filename = os.path.join(tmp_dirname, "README.md") self.assertTrue(os.path.exists(filename)) with open(filename, "r", encoding="utf-8") as f: readme = f.read() metainfo = re.search(r"---\n(.*?)\n---", readme, re.DOTALL).group(1) dct = yaml.safe_load(metainfo) self.assertEqual(dct["library_name"], "peft") if hasattr(model, "config"): self.assertEqual(dct["base_model"], model.config.to_dict()["_name_or_path"]) else: # a custom model self.assertTrue("base_model" not in dct) def check_config_json(self, tmp_dirname, model): # check the generated config.json filename = os.path.join(tmp_dirname, "adapter_config.json") self.assertTrue(os.path.exists(filename)) with open(filename, "r", encoding="utf-8") as f: config = json.load(f) if hasattr(model, "config"): # custom models don't have a config attribute self.assertEqual(config["base_model_name_or_path"], model.config.to_dict()["_name_or_path"]) def _test_model_attr(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) self.assertTrue(hasattr(model, "save_pretrained")) self.assertTrue(hasattr(model, "from_pretrained")) self.assertTrue(hasattr(model, "push_to_hub")) def _test_adapter_name(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config, adapter_name="test-adapter") correctly_converted = False for n, _ in model.named_parameters(): if "test-adapter" in n: correctly_converted = True break self.assertTrue(correctly_converted) def _test_prepare_for_training(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) dummy_input = self.prepare_inputs_for_testing() dummy_output = model.get_input_embeddings()(dummy_input["input_ids"]) self.assertFalse(dummy_output.requires_grad) # load with `prepare_model_for_int8_training` model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) model = prepare_model_for_int8_training(model) for param in model.parameters(): self.assertFalse(param.requires_grad) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) dummy_input = self.prepare_inputs_for_testing() dummy_output = model.get_input_embeddings()(dummy_input["input_ids"]) self.assertTrue(dummy_output.requires_grad) def _test_save_pretrained(self, model_id, config_cls, config_kwargs, safe_serialization=True): # ensure that the weights are randomly initialized if issubclass(config_cls, LoraConfig): config_kwargs = config_kwargs.copy() config_kwargs["init_lora_weights"] = False if issubclass(config_cls, IA3Config): config_kwargs = config_kwargs.copy() config_kwargs["init_ia3_weights"] = False model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: if safe_serialization: model.save_pretrained(tmp_dirname) else: model.save_pretrained(tmp_dirname, safe_serialization=False) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) # check if the state dicts are equal if issubclass(config_cls, PromptEncoderConfig): # For prompt encoding, when loading the whole state_dict, there are differences, therefore, only load # adapter-specific weights for comparison. # TODO: is this expected? state_dict = get_peft_model_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained, unwrap_compiled=True) else: state_dict = get_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_state_dict(model_from_pretrained, unwrap_compiled=True) # check if tensors equal for key in state_dict.keys(): self.assertTrue( torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) ) target_adapter_filename = "adapter_model.safetensors" if safe_serialization else "adapter_model.bin" # check if `adapter_model.safetensors` is present self.assertTrue(os.path.exists(os.path.join(tmp_dirname, target_adapter_filename))) # check if `adapter_config.json` is present self.assertTrue(os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))) # check if `model.safetensors` is not present self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "model.safetensors"))) # check if `config.json` is not present self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "config.json"))) self.check_modelcard(tmp_dirname, model) self.check_config_json(tmp_dirname, model) def _test_save_pretrained_selected_adapters(self, model_id, config_cls, config_kwargs, safe_serialization=True): if issubclass(config_cls, AdaLoraConfig): # AdaLora does not support adding more than 1 adapter return # ensure that the weights are randomly initialized if issubclass(config_cls, LoraConfig): config_kwargs = config_kwargs.copy() config_kwargs["init_lora_weights"] = False if issubclass(config_cls, IA3Config): config_kwargs = config_kwargs.copy() config_kwargs["init_ia3_weights"] = False model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) new_adapter_config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model.add_adapter("new_adapter", new_adapter_config) with tempfile.TemporaryDirectory() as tmp_dirname: if safe_serialization: model.save_pretrained(tmp_dirname) else: model.save_pretrained(tmp_dirname, safe_serialization=False) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) new_adapter_dir = os.path.join(tmp_dirname, "new_adapter") model_from_pretrained.load_adapter(new_adapter_dir, "new_adapter") # check if the state dicts are equal if issubclass(config_cls, PromptEncoderConfig): # For prompt encoding, when loading the whole state_dict, there are differences, therefore, only load # adapter-specific weights for comparison. # TODO: is this expected? state_dict = get_peft_model_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained, unwrap_compiled=True) else: state_dict = get_state_dict(model, unwrap_compiled=True) state_dict_from_pretrained = get_state_dict(model_from_pretrained, unwrap_compiled=True) # check if same keys self.assertEqual(state_dict.keys(), state_dict_from_pretrained.keys()) # check if tensors equal for key in state_dict.keys(): self.assertTrue( torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) ) target_adapter_filename = "adapter_model.safetensors" if safe_serialization else "adapter_model.bin" # check if `adapter_model.safetensors` is present self.assertTrue(os.path.exists(os.path.join(tmp_dirname, target_adapter_filename))) self.assertTrue(os.path.exists(os.path.join(new_adapter_dir, target_adapter_filename))) # check if `adapter_config.json` is present self.assertTrue(os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))) self.assertTrue(os.path.exists(os.path.join(new_adapter_dir, "adapter_config.json"))) # check if `model.safetensors` is not present self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "model.safetensors"))) self.assertFalse(os.path.exists(os.path.join(new_adapter_dir, "model.safetensors"))) # check if `config.json` is not present self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "config.json"))) self.assertFalse(os.path.exists(os.path.join(new_adapter_dir, "config.json"))) self.check_modelcard(tmp_dirname, model) self.check_config_json(tmp_dirname, model) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, selected_adapters=["default"]) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) self.assertTrue("default" in model_from_pretrained.peft_config.keys()) self.assertTrue("new_adapter" not in model_from_pretrained.peft_config.keys()) def _test_from_pretrained_config_construction(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls(base_model_name_or_path=model_id, **config_kwargs) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained( model_from_pretrained, tmp_dirname, is_trainable=False, config=config ) self.assertTrue(model_from_pretrained.peft_config["default"].inference_mode) self.assertIs(model_from_pretrained.peft_config["default"], config) def _test_merge_layers_fp16(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig,): # Merge layers only supported for LoRA and IA³ return if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") model = self.transformers_class.from_pretrained(model_id, torch_dtype=torch.float16) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(device="cpu", dtype=torch.float16) model.eval() # This should simply work _ = model.merge_and_unload() def _test_merge_layers_nan(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig, IA3Config, AdaLoraConfig): # Merge layers only supported for LoRA and IA³ return if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() model.eval() # This should work logits_unmerged = model(**dummy_input)[0] model = model.merge_and_unload() logits_merged = model(**dummy_input)[0] self.assertTrue(torch.allclose(logits_unmerged, logits_merged, atol=1e-3, rtol=1e-3)) model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) for name, module in model.named_parameters(): if "lora_A" in name or "ia3" in name or "lora_E" in name or "lora_B" in name: module.data[0] = torch.nan with self.assertRaises(ValueError) as error_context: model = model.merge_and_unload(safe_merge=True) self.assertEqual( str(error_context.exception), "NaNs detected in the merged weights. The adapter default seems to be broken", ) for name, module in model.named_parameters(): if "lora_A" in name or "ia3" in name or "lora_E" in name or "lora_B" in name: module.data[0] = torch.inf with self.assertRaises(ValueError) as error_context: model = model.merge_and_unload(safe_merge=True) self.assertEqual( str(error_context.exception), "NaNs detected in the merged weights. The adapter default seems to be broken", ) def _test_merge_layers(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig, IA3Config): # Merge layers only supported for LoRA and IA³ return if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) if config.peft_type not in ("IA3", "LORA"): with self.assertRaises(AttributeError): model = model.merge_and_unload() dummy_input = self.prepare_inputs_for_testing() model.eval() logits = model(**dummy_input)[0] model.merge_adapter() logits_merged = model(**dummy_input)[0] model.unmerge_adapter() logits_unmerged = model(**dummy_input)[0] model = model.merge_and_unload() logits_merged_unloaded = model(**dummy_input)[0] atol, rtol = 1e-4, 1e-4 if (config.peft_type == "IA3") and (model_id == "Conv2d"): # for some reason, the IA³ Conv2d introduces a larger error atol, rtol = 0.3, 0.01 self.assertTrue(torch.allclose(logits, logits_merged, atol=atol, rtol=rtol)) self.assertTrue(torch.allclose(logits, logits_unmerged, atol=atol, rtol=rtol)) self.assertTrue(torch.allclose(logits, logits_merged_unloaded, atol=atol, rtol=rtol)) # For this test to work, weights should not be initialized to identity transform (e.g. # init_lora_weights should be False). transformers_model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) logits_transformers = transformers_model(**dummy_input)[0] self.assertFalse(torch.allclose(logits_merged, logits_transformers, atol=1e-10, rtol=1e-10)) # test that the logits are identical after a save-load-roundtrip if hasattr(model, "save_pretrained"): # model is a transformers model with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(tmp_dirname).to(self.torch_device) else: # model is not a transformers model model_from_pretrained = pickle.loads(pickle.dumps(model)) logits_merged_from_pretrained = model_from_pretrained(**dummy_input)[0] self.assertTrue(torch.allclose(logits_merged, logits_merged_from_pretrained, atol=atol, rtol=rtol)) def _test_merge_layers_multi(self, model_id, config_cls, config_kwargs): supported_peft_types = [PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT] if ("gpt2" in model_id.lower()) and (config_cls == IA3Config): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if config.peft_type not in supported_peft_types: return model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() model.eval() with torch.inference_mode(): logits_adapter_1 = model(**dummy_input)[0] model.add_adapter("adapter-2", config) model.set_adapter("adapter-2") model.eval() with torch.inference_mode(): logits_adapter_2 = model(**dummy_input)[0] self.assertFalse(torch.allclose(logits_adapter_1, logits_adapter_2, atol=1e-3, rtol=1e-3)) model.set_adapter("default") with torch.inference_mode(): logits_adapter_1_after_set = model(**dummy_input)[0] self.assertTrue(torch.allclose(logits_adapter_1_after_set, logits_adapter_1, atol=1e-3, rtol=1e-3)) model_copy = copy.deepcopy(model) model_copy_2 = copy.deepcopy(model) model_merged_all = model.merge_and_unload(adapter_names=["adapter-2", "default"]) with torch.inference_mode(): logits_merged_all = model_merged_all(**dummy_input)[0] self.assertFalse(torch.allclose(logits_merged_all, logits_adapter_2, atol=1e-3, rtol=1e-3)) self.assertFalse(torch.allclose(logits_merged_all, logits_adapter_1, atol=1e-3, rtol=1e-3)) model_merged_adapter_2 = model_copy.merge_and_unload(adapter_names=["adapter-2"]) with torch.inference_mode(): logits_merged_adapter_2 = model_merged_adapter_2(**dummy_input)[0] self.assertTrue(torch.allclose(logits_merged_adapter_2, logits_adapter_2, atol=1e-3, rtol=1e-3)) model_merged_adapter_default = model_copy_2.merge_and_unload(adapter_names=["default"]) with torch.inference_mode(): logits_merged_adapter_default = model_merged_adapter_default(**dummy_input)[0] self.assertTrue(torch.allclose(logits_merged_adapter_default, logits_adapter_1, atol=1e-3, rtol=1e-3)) def _test_generate(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `generate` works _ = model.generate(**inputs) with self.assertRaises(TypeError): # check if `generate` raises an error if no positional arguments are passed _ = model.generate(inputs["input_ids"]) def _test_generate_half_prec(self, model_id, config_cls, config_kwargs): if config_cls not in (IA3Config, LoraConfig, PrefixTuningConfig): return model = self.transformers_class.from_pretrained(model_id, torch_dtype=torch.bfloat16) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask) with self.assertRaises(TypeError): # check if `generate` raises an error if no positional arguments are passed _ = model.generate(input_ids, attention_mask=attention_mask) def _test_prefix_tuning_half_prec_conversion(self, model_id, config_cls, config_kwargs): if config_cls not in (PrefixTuningConfig,): return config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.half() self.assertEqual(model.base_model_torch_dtype, torch.float16) def _test_training(self, model_id, config_cls, config_kwargs): if config_cls not in (IA3Config, LoraConfig): return model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() parameter_prefix = "ia3" if config_cls == IA3Config else "lora" for n, param in model.named_parameters(): if (parameter_prefix in n) or ("modules_to_save" in n): self.assertIsNotNone(param.grad) else: self.assertIsNone(param.grad) def _test_inference_safetensors(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig,): return config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] logits = output[0] loss = output.sum() loss.backward() # set to eval mode, since things like dropout can affect the output otherwise model.eval() logits = model(**inputs)[0][0] with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, safe_serialization=True) self.assertTrue("adapter_model.safetensors" in os.listdir(tmp_dirname)) self.assertTrue("adapter_model.bin" not in os.listdir(tmp_dirname)) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname).to(self.torch_device) logits_from_pretrained = model_from_pretrained(**inputs)[0][0] self.assertTrue(torch.allclose(logits, logits_from_pretrained, atol=1e-4, rtol=1e-4)) def _test_training_layer_indexing(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig,): return config = config_cls( base_model_name_or_path=model_id, layers_to_transform=[0], **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] logits = output[0] loss = output.sum() loss.backward() nb_trainable = 0 for n, param in model.named_parameters(): if "lora" in n: self.assertIsNotNone(param.grad) nb_trainable += 1 else: self.assertIsNone(param.grad) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname).to(self.torch_device) logits_from_pretrained = model_from_pretrained(**inputs)[0][0] self.assertTrue(torch.allclose(logits, logits_from_pretrained, atol=1e-4, rtol=1e-4)) model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) nb_trainable_all = 0 for n, param in model.named_parameters(): if "lora" in n: nb_trainable_all += 1 self.assertLess(nb_trainable, nb_trainable_all) def _test_training_gradient_checkpointing(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig, IA3Config): return model = self.transformers_class.from_pretrained(model_id) if not getattr(model, "supports_gradient_checkpointing", False): return model.gradient_checkpointing_enable() config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() parameter_prefix = "ia3" if config_cls == IA3Config else "lora" for n, param in model.named_parameters(): if parameter_prefix in n: self.assertIsNotNone(param.grad) else: self.assertIsNone(param.grad) def _test_peft_model_device_map(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig,): return config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) _ = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname, device_map={"": "cpu"}).to( self.torch_device ) def _test_training_prompt_learning_tasks(self, model_id, config_cls, config_kwargs): if not issubclass(config_cls, PromptLearningConfig): return model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() # check that prompt encoder has grads for param in model.prompt_encoder.parameters(): self.assertIsNotNone(param.grad) def _test_delete_adapter(self, model_id, config_cls, config_kwargs): supported_peft_types = [PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT] # IA3 does not support deleting adapters yet, but it just needs to be added # AdaLora does not support multiple adapters config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if config.peft_type not in supported_peft_types: return model = self.transformers_class.from_pretrained(model_id) adapter_to_delete = "delete_me" model = get_peft_model(model, config) model.add_adapter(adapter_to_delete, config) model.set_adapter(adapter_to_delete) model = model.to(self.torch_device) model.delete_adapter(adapter_to_delete) self.assertFalse(adapter_to_delete in model.peft_config) self.assertEqual(model.active_adapters, ["default"]) key_list = [key for key, _ in model.named_modules()] for key in key_list: _, target, _ = _get_submodules(model, key) attributes_to_check = getattr(target, "adapter_layer_names", []) + getattr(target, "other_param_names", []) for attr in attributes_to_check: self.assertFalse(adapter_to_delete in getattr(target, attr)) # check that we can also delete the last remaining adapter model.delete_adapter("default") self.assertFalse("default" in model.peft_config) self.assertEqual(model.active_adapters, []) input = self.prepare_inputs_for_testing() # note: we cannot call model(**input) because PeftModel always expects there to be at least one adapter model.base_model(**input) # should not raise an error def _test_delete_inactive_adapter(self, model_id, config_cls, config_kwargs): # same as test_delete_adapter, but this time an inactive adapter is deleted supported_peft_types = [PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.IA3, PeftType.OFT] # IA3 does not support deleting adapters yet, but it just needs to be added # AdaLora does not support multiple adapters config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if config.peft_type not in supported_peft_types: return model = self.transformers_class.from_pretrained(model_id) adapter_to_delete = "delete_me" model = get_peft_model(model, config) model.add_adapter(adapter_to_delete, config) # "delete_me" is added but not activated model = model.to(self.torch_device) model.delete_adapter(adapter_to_delete) self.assertFalse(adapter_to_delete in model.peft_config) self.assertEqual(model.active_adapters, ["default"]) key_list = [key for key, _ in model.named_modules()] for key in key_list: _, target, _ = _get_submodules(model, key) attributes_to_check = getattr(target, "adapter_layer_names", []) + getattr(target, "other_param_names", []) for attr in attributes_to_check: self.assertFalse(adapter_to_delete in getattr(target, attr)) # check that we can also delete the last remaining adapter model.delete_adapter("default") self.assertFalse("default" in model.peft_config) self.assertEqual(model.active_adapters, []) input = self.prepare_inputs_for_testing() # note: we cannot call model(**input) because PeftModel always expects there to be at least one adapter model.base_model(**input) # should not raise an error def _test_unload_adapter(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) if config.peft_type not in ("LORA", "ADALORA", "IA3"): with self.assertRaises(AttributeError): model = model.unload() else: dummy_input = self.prepare_inputs_for_testing() logits_with_adapter = model(**dummy_input)[0] transformers_model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) logits_transformers = transformers_model(**dummy_input)[0] model.eval() model = model.unload() logits_unload = model(**dummy_input)[0] self.assertFalse(torch.allclose(logits_with_adapter, logits_unload, atol=1e-10, rtol=1e-10)) self.assertTrue(torch.allclose(logits_transformers, logits_unload, atol=1e-4, rtol=1e-4)) def _test_weighted_combination_of_adapters(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, AdaLoraConfig): # AdaLora does not support adding more than 1 adapter return adapter_list = ["adapter1", "adapter_2", "adapter_3"] weight_list = [0.5, 1.5, 1.5] config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if not isinstance(config, (LoraConfig)): return model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, adapter_list[0]) model.add_adapter(adapter_list[1], config) model.add_adapter(adapter_list[2], replace(config, r=20)) model = model.to(self.torch_device) # test re-weighting single adapter model.add_weighted_adapter([adapter_list[0]], [weight_list[0]], "single_adapter_reweighting") # test svd re-weighting with multiple adapters model.add_weighted_adapter(adapter_list[1:], weight_list[1:], "multi_adapter_svd_reweighting") # test cat re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_cat_reweighting", combination_type="cat" ) # test linear re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_linear_reweighting", combination_type="linear" ) # test linear re-weighting with multiple adapters with only first adapter having non zero weight model.add_weighted_adapter( adapter_list[:2], [weight_list[0], 0], "multi_adapter_linear_reweighting_single_enabled", combination_type="linear", ) with self.assertRaises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_linear_reweighting_uneven_r", combination_type="linear", ) new_adapters = [ "single_adapter_reweighting", "multi_adapter_svd_reweighting", "multi_adapter_cat_reweighting", "multi_adapter_linear_reweighting", "multi_adapter_linear_reweighting_single_enabled", ] for new_adapter in new_adapters: self.assertTrue(new_adapter in model.peft_config) key_list = [key for key, _ in model.named_modules()] for key in key_list: _, target, _ = _get_submodules(model, key) if isinstance(target, LoraLayer): for adapter_name in new_adapters: if "single" in adapter_name: new_delta_weight = target.get_delta_weight(adapter_name) weighted_original_delta_weights = target.get_delta_weight(adapter_list[0]) * weight_list[0] self.assertTrue( torch.allclose(new_delta_weight, weighted_original_delta_weights, atol=1e-4, rtol=1e-4) ) elif "svd" in adapter_name: self.assertTrue(target.r[adapter_name] == 20) elif "linear" in adapter_name: self.assertTrue(target.r[adapter_name] == 8) elif "cat" in adapter_name: self.assertTrue(target.r[adapter_name] == 28) dummy_input = self.prepare_inputs_for_testing() model.eval() for adapter_name in new_adapters: # ensuring new adapters pass the forward loop model.set_adapter(adapter_name) self.assertTrue(model.active_adapter == adapter_name) self.assertTrue(model.active_adapters == [adapter_name]) model(**dummy_input)[0] def _test_disable_adapter(self, model_id, config_cls, config_kwargs): task_type = config_kwargs.get("task_type") if (task_type == "SEQ_2_SEQ_LM") and (config_cls in (PromptTuningConfig, PromptEncoderConfig)): self.skipTest("Seq2Seq + prompt tuning/prompt encoder does not work with disabling adapters") def get_output(model): # helper function that works with different model types torch.manual_seed(0) if hasattr(model, "generate"): # let's check the scores, not the output ids, since the latter can easily be identical even if the # weights are slightly changed output = model.generate(**input, return_dict_in_generate=True, output_scores=True).scores[0] # take element 0, as output is a tuple else: output = model(**input) if hasattr(output, "images"): # for SD import numpy as np img = output.images[0] return torch.from_numpy(np.array(img)) return output # initialize model model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) # output from BASE MODEL input = self.prepare_inputs_for_testing() output_before = get_output(model) # output from PEFT MODEL if hasattr(self, "instantiate_sd_peft"): # SD models are instantiated differently peft_model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) else: config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) peft_model = get_peft_model(model, config) output_peft = get_output(peft_model) # first check trivial case is not true that peft does not affect the output; for this to work, init_lora_weight # must be False if isinstance(peft_model, StableDiffusionPipeline): # for SD, check that most pixels have different values self.assertTrue((output_before != output_peft).float().mean() > 0.8) else: self.assertFalse(torch.allclose(output_before, output_peft)) # output with DISABLED ADAPTER if isinstance(peft_model, StableDiffusionPipeline): with peft_model.unet.disable_adapter(): with peft_model.text_encoder.disable_adapter(): output_peft_disabled = get_output(peft_model) # for SD, very rarely, a pixel can differ self.assertTrue((output_before != output_peft_disabled).float().mean() < 1e-4) else: with peft_model.disable_adapter(): output_peft_disabled = get_output(peft_model) self.assertTrue(torch.allclose(output_before, output_peft_disabled, atol=1e-6, rtol=1e-6)) # TODO: add tests to check if disabling adapters works after calling merge_adapter def _test_adding_multiple_adapters_with_bias_raises(self, model_id, config_cls, config_kwargs): # When trying to add multiple adapters with bias in Lora or AdaLora, an error should be # raised. Also, the peft model should not be left in a half-initialized state. if not issubclass(config_cls, (LoraConfig, AdaLoraConfig)): return config_kwargs = config_kwargs.copy() config_kwargs["bias"] = "all" config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, "adapter0") with self.assertRaises(ValueError): model.add_adapter("adapter1", replace(config, r=20)) # (superficial) test that the model is not left in a half-initialized state when adding an adapter fails self.assertFalse("adapter1" in model.peft_config) self.assertFalse("adapter1" in model.base_model.peft_config) def _test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs): # https://github.com/huggingface/peft/issues/727 model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config, adapter_name="test-adapter").to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() inputs_embeds = model.get_input_embeddings()(dummy_input["input_ids"]) # just check that no error is raised model.forward(inputs_embeds=inputs_embeds)
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_common_gpu.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import pytest import torch import torch.nn.functional as F from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoTokenizer, BitsAndBytesConfig, LlamaForCausalLM, WhisperForConditionalGeneration, ) from peft import ( AdaptionPromptConfig, IA3Config, LoraConfig, PeftModel, TaskType, get_peft_model, prepare_model_for_kbit_training, ) from peft.import_utils import is_bnb_4bit_available, is_bnb_available from .testing_utils import require_bitsandbytes, require_torch_gpu, require_torch_multi_gpu if is_bnb_available(): import bitsandbytes as bnb from peft.tuners.ia3 import Linear8bitLt as IA3Linear8bitLt from peft.tuners.lora import Linear8bitLt as LoraLinear8bitLt if is_bnb_4bit_available(): from peft.tuners.ia3 import Linear4bit as IA3Linear4bit from peft.tuners.lora import Linear4bit as LoraLinear4bit @require_torch_gpu class PeftGPUCommonTests(unittest.TestCase): r""" A common tester to run common operations that are performed on GPU such as generation, loading in 8bit, etc. """ def setUp(self): self.seq2seq_model_id = "google/flan-t5-base" self.causal_lm_model_id = "facebook/opt-350m" self.audio_model_id = "openai/whisper-large" if torch.cuda.is_available(): self.device = torch.device("cuda:0") def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() @require_bitsandbytes @pytest.mark.multi_gpu_tests @pytest.mark.single_gpu_tests def test_lora_bnb_8bit_quantization(self): r""" Test that tests if the 8bit quantization using LoRA works as expected """ whisper_8bit = WhisperForConditionalGeneration.from_pretrained( self.audio_model_id, device_map="auto", load_in_8bit=True, ) opt_8bit = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", load_in_8bit=True, ) flan_8bit = AutoModelForSeq2SeqLM.from_pretrained( self.seq2seq_model_id, device_map="auto", load_in_8bit=True, ) flan_lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="SEQ_2_SEQ_LM" ) opt_lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none") flan_8bit = get_peft_model(flan_8bit, flan_lora_config) self.assertTrue( isinstance(flan_8bit.base_model.model.encoder.block[0].layer[0].SelfAttention.q, LoraLinear8bitLt) ) opt_8bit = get_peft_model(opt_8bit, opt_lora_config) self.assertTrue( isinstance(opt_8bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, LoraLinear8bitLt) ) whisper_8bit = get_peft_model(whisper_8bit, config) self.assertTrue( isinstance(whisper_8bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, LoraLinear8bitLt) ) @require_bitsandbytes @pytest.mark.multi_gpu_tests @pytest.mark.single_gpu_tests def test_ia3_bnb_8bit_quantization(self): r""" Test that tests if the 8bit quantization using IA3 works as expected """ whisper_8bit = WhisperForConditionalGeneration.from_pretrained( self.audio_model_id, device_map="auto", load_in_8bit=True, ) opt_8bit = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", load_in_8bit=True, ) flan_8bit = AutoModelForSeq2SeqLM.from_pretrained( self.seq2seq_model_id, device_map="auto", load_in_8bit=True, ) flan_ia3_config = IA3Config(target_modules=["q", "v"], task_type="SEQ_2_SEQ_LM") opt_ia3_config = IA3Config( target_modules=["q_proj", "v_proj", "fc2"], feedforward_modules=["fc2"], task_type="CAUSAL_LM", ) config = IA3Config(target_modules=["q_proj", "v_proj", "fc2"], feedforward_modules=["fc2"]) flan_8bit = get_peft_model(flan_8bit, flan_ia3_config) self.assertTrue( isinstance(flan_8bit.base_model.model.encoder.block[0].layer[0].SelfAttention.q, IA3Linear8bitLt) ) opt_8bit = get_peft_model(opt_8bit, opt_ia3_config) self.assertTrue( isinstance(opt_8bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, IA3Linear8bitLt) ) whisper_8bit = get_peft_model(whisper_8bit, config) self.assertTrue( isinstance(whisper_8bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, IA3Linear8bitLt) ) @require_bitsandbytes @pytest.mark.multi_gpu_tests @pytest.mark.single_gpu_tests def test_lora_bnb_4bit_quantization_from_pretrained_safetensors(self): r""" Test that tests if the 4bit quantization using LoRA works as expected with safetensors weights. """ model_id = "facebook/opt-350m" peft_model_id = "ybelkada/test-st-lora" model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True) model = PeftModel.from_pretrained(model, peft_model_id) _ = model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0)) @require_bitsandbytes @pytest.mark.multi_gpu_tests @pytest.mark.single_gpu_tests def test_lora_bnb_4bit_quantization(self): r""" Test that tests if the 4bit quantization using LoRA works as expected """ whisper_4bit = WhisperForConditionalGeneration.from_pretrained( self.audio_model_id, device_map="auto", load_in_4bit=True, ) opt_4bit = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", load_in_4bit=True, ) flan_4bit = AutoModelForSeq2SeqLM.from_pretrained( self.seq2seq_model_id, device_map="auto", load_in_4bit=True, ) flan_lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="SEQ_2_SEQ_LM" ) opt_lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none") flan_4bit = get_peft_model(flan_4bit, flan_lora_config) self.assertTrue( isinstance(flan_4bit.base_model.model.encoder.block[0].layer[0].SelfAttention.q, LoraLinear4bit) ) opt_4bit = get_peft_model(opt_4bit, opt_lora_config) self.assertTrue(isinstance(opt_4bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, LoraLinear4bit)) whisper_4bit = get_peft_model(whisper_4bit, config) self.assertTrue( isinstance(whisper_4bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, LoraLinear4bit) ) @require_bitsandbytes @pytest.mark.multi_gpu_tests @pytest.mark.single_gpu_tests def test_ia3_bnb_4bit_quantization(self): r""" Test that tests if the 4bit quantization using IA3 works as expected """ whisper_4bit = WhisperForConditionalGeneration.from_pretrained( self.audio_model_id, device_map="auto", load_in_4bit=True, ) opt_4bit = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", load_in_4bit=True, ) flan_4bit = AutoModelForSeq2SeqLM.from_pretrained( self.seq2seq_model_id, device_map="auto", load_in_4bit=True, ) flan_ia3_config = IA3Config(target_modules=["q", "v"], task_type="SEQ_2_SEQ_LM") opt_ia3_config = IA3Config( target_modules=["q_proj", "v_proj", "fc2"], feedforward_modules=["fc2"], task_type="CAUSAL_LM", ) config = IA3Config(target_modules=["q_proj", "v_proj", "fc2"], feedforward_modules=["fc2"]) flan_4bit = get_peft_model(flan_4bit, flan_ia3_config) self.assertTrue( isinstance(flan_4bit.base_model.model.encoder.block[0].layer[0].SelfAttention.q, IA3Linear4bit) ) opt_4bit = get_peft_model(opt_4bit, opt_ia3_config) self.assertTrue(isinstance(opt_4bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, IA3Linear4bit)) whisper_4bit = get_peft_model(whisper_4bit, config) self.assertTrue( isinstance(whisper_4bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, IA3Linear4bit) ) @pytest.mark.multi_gpu_tests @require_torch_multi_gpu def test_lora_causal_lm_multi_gpu_inference(self): r""" Test if LORA can be used for inference on multiple GPUs. """ lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, device_map="balanced") tokenizer = AutoTokenizer.from_pretrained(self.seq2seq_model_id) self.assertEqual(set(model.hf_device_map.values()), set(range(torch.cuda.device_count()))) model = get_peft_model(model, lora_config) self.assertTrue(isinstance(model, PeftModel)) dummy_input = "This is a dummy input:" input_ids = tokenizer(dummy_input, return_tensors="pt").input_ids.to(self.device) # this should work without any problem _ = model.generate(input_ids=input_ids) @require_torch_multi_gpu @pytest.mark.multi_gpu_tests @require_bitsandbytes def test_lora_seq2seq_lm_multi_gpu_inference(self): r""" Test if LORA can be used for inference on multiple GPUs - 8bit version. """ lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="SEQ_2_SEQ_LM" ) model = AutoModelForSeq2SeqLM.from_pretrained(self.seq2seq_model_id, device_map="balanced", load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained(self.seq2seq_model_id) self.assertEqual(set(model.hf_device_map.values()), set(range(torch.cuda.device_count()))) model = get_peft_model(model, lora_config) self.assertTrue(isinstance(model, PeftModel)) self.assertTrue(isinstance(model.base_model.model.encoder.block[0].layer[0].SelfAttention.q, LoraLinear8bitLt)) dummy_input = "This is a dummy input:" input_ids = tokenizer(dummy_input, return_tensors="pt").input_ids.to(self.device) # this should work without any problem _ = model.generate(input_ids=input_ids) @require_torch_multi_gpu @pytest.mark.multi_gpu_tests @require_bitsandbytes def test_adaption_prompt_8bit(self): model = LlamaForCausalLM.from_pretrained( "HuggingFaceM4/tiny-random-LlamaForCausalLM", load_in_8bit=True, torch_dtype=torch.float16, device_map="auto", ) model = prepare_model_for_kbit_training(model) config = AdaptionPromptConfig( adapter_len=10, adapter_layers=2, task_type="CAUSAL_LM", ) model = get_peft_model(model, config) random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(0) _ = model(random_input) @require_torch_multi_gpu @pytest.mark.multi_gpu_tests @require_bitsandbytes def test_adaption_prompt_4bit(self): model = LlamaForCausalLM.from_pretrained( "HuggingFaceM4/tiny-random-LlamaForCausalLM", load_in_4bit=True, torch_dtype=torch.float16, device_map="auto", ) model = prepare_model_for_kbit_training(model) config = AdaptionPromptConfig( adapter_len=10, adapter_layers=2, task_type="CAUSAL_LM", ) model = get_peft_model(model, config) random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(0) _ = model(random_input) @require_torch_gpu @pytest.mark.single_gpu_tests @require_bitsandbytes def test_print_4bit_expected(self): EXPECTED_TRAINABLE_PARAMS = 294912 EXPECTED_ALL_PARAMS = 125534208 model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", load_in_4bit=True, ) config = LoraConfig( r=8, ) model = get_peft_model(model, config) trainable_params, all_params = model.get_nb_trainable_parameters() self.assertEqual(trainable_params, EXPECTED_TRAINABLE_PARAMS) self.assertEqual(all_params, EXPECTED_ALL_PARAMS) # test with double quant bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, ) model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", quantization_config=bnb_config, ) config = LoraConfig( r=8, ) model = get_peft_model(model, config) trainable_params, all_params = model.get_nb_trainable_parameters() self.assertEqual(trainable_params, EXPECTED_TRAINABLE_PARAMS) self.assertEqual(all_params, EXPECTED_ALL_PARAMS) @require_torch_gpu @pytest.mark.single_gpu_tests @require_bitsandbytes def test_modules_to_save_grad(self): model_id = "bigscience/bloomz-560m" load_in_4bit = True model = AutoModelForSequenceClassification.from_pretrained( model_id, load_in_4bit=load_in_4bit, torch_dtype=torch.float32, ) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=16, lora_dropout=0.05, bias="none", task_type="SEQ_CLS", ) peft_model = get_peft_model(model, config) lm_head = peft_model.base_model.model.score original_module = lm_head.original_module modules_to_save = lm_head.modules_to_save.default inputs = torch.randn((1024)) o1 = lm_head(inputs) o1.mean().backward() self.assertTrue(modules_to_save.weight.requires_grad is True) self.assertTrue(original_module.weight.grad is None) self.assertTrue(modules_to_save.weight.grad is not None) @require_torch_gpu @pytest.mark.single_gpu_tests @require_bitsandbytes def test_8bit_merge_lora(self): torch.manual_seed(1000) model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", load_in_8bit=True, ) random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(model.device) out_base = F.softmax(model(random_input).logits, dim=-1) config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(model, config) with torch.inference_mode(): out_before_merge = F.softmax(model(random_input).logits, dim=-1) model.merge_and_unload() with torch.inference_mode(): out_after_merge = F.softmax(model(random_input).logits, dim=-1) atol = 0.01 rtol = 10 self.assertFalse(torch.allclose(out_base, out_before_merge, atol=atol, rtol=rtol)) self.assertTrue(torch.allclose(out_before_merge, out_after_merge, atol=atol, rtol=rtol)) self.assertTrue(isinstance(model, PeftModel)) self.assertTrue( isinstance(model.base_model.model.model.decoder.layers[0].self_attn.q_proj, bnb.nn.Linear8bitLt) ) self.assertTrue( isinstance(model.base_model.model.model.decoder.layers[0].self_attn.v_proj, bnb.nn.Linear8bitLt) ) @require_torch_gpu @pytest.mark.single_gpu_tests @require_bitsandbytes def test_8bit_merge_and_disable_lora(self): torch.manual_seed(1000) model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", load_in_8bit=True, ) random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(model.device) # compare outputs in probability space, because logits can have outliers # and token ids are not precise enough out_base = F.softmax(model(random_input).logits, dim=-1) config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(model, config) with torch.inference_mode(): out_before = F.softmax(model(random_input).logits, dim=-1) model.merge_adapter() with model.disable_adapter(): with torch.inference_mode(): out_after = F.softmax(model(random_input).logits, dim=-1) atol = 0.01 rtol = 10 self.assertFalse(torch.allclose(out_base, out_before, atol=atol, rtol=rtol)) self.assertTrue(torch.allclose(out_base, out_after, atol=atol, rtol=rtol)) self.assertTrue(isinstance(model, PeftModel)) self.assertTrue(isinstance(model.base_model.model.model.decoder.layers[0].self_attn.q_proj, LoraLinear8bitLt)) self.assertTrue(isinstance(model.base_model.model.model.decoder.layers[0].self_attn.v_proj, LoraLinear8bitLt)) @require_torch_gpu @pytest.mark.single_gpu_tests @require_bitsandbytes def test_4bit_merge_lora(self): torch.manual_seed(3000) bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=False, bnb_4bit_compute_type=torch.float32, ) model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", quantization_config=bnb_config, torch_dtype=torch.float32, ) random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(model.device) # compare outputs in probability space, because logits can have outliers # and token ids are not precise enough out_base = F.softmax(model(random_input).logits, dim=-1) config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(model, config) with torch.inference_mode(): out_before_merge = F.softmax(model(random_input).logits, dim=-1) model.merge_and_unload() with torch.inference_mode(): out_after_merge = F.softmax(model(random_input).logits, dim=-1) # tolerances are pretty high because some deviations are expected with quantization atol = 0.01 rtol = 10 self.assertFalse(torch.allclose(out_base, out_before_merge, atol=atol, rtol=rtol)) self.assertTrue(torch.allclose(out_before_merge, out_after_merge, atol=atol, rtol=rtol)) self.assertTrue(isinstance(model, PeftModel)) self.assertTrue(isinstance(model.base_model.model.model.decoder.layers[0].self_attn.q_proj, bnb.nn.Linear4bit)) self.assertTrue(isinstance(model.base_model.model.model.decoder.layers[0].self_attn.v_proj, bnb.nn.Linear4bit)) @require_torch_gpu @pytest.mark.single_gpu_tests @require_bitsandbytes def test_4bit_merge_and_disable_lora(self): torch.manual_seed(3000) bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=False, bnb_4bit_compute_type=torch.float32, ) model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", quantization_config=bnb_config, torch_dtype=torch.float32, ) random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(model.device) # compare outputs in probability space, because logits can have outliers # and token ids are not precise enough out_base = F.softmax(model(random_input).logits, dim=-1) config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(model, config) with torch.inference_mode(): out_before = F.softmax(model(random_input).logits, dim=-1) model.merge_adapter() with model.disable_adapter(): with torch.inference_mode(): out_after = F.softmax(model(random_input).logits, dim=-1) atol = 0.01 rtol = 10 self.assertFalse(torch.allclose(out_base, out_before, atol=atol, rtol=rtol)) self.assertTrue(torch.allclose(out_base, out_after, atol=atol, rtol=rtol)) self.assertTrue(isinstance(model, PeftModel)) self.assertTrue(isinstance(model.base_model.model.model.decoder.layers[0].self_attn.q_proj, LoraLinear4bit)) self.assertTrue(isinstance(model.base_model.model.model.decoder.layers[0].self_attn.v_proj, LoraLinear4bit)) @require_torch_gpu @pytest.mark.single_gpu_tests def test_serialization_shared_tensors(self): model_checkpoint = "roberta-base" peft_config = LoraConfig( task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all" ) model = AutoModelForTokenClassification.from_pretrained(model_checkpoint, num_labels=11).to("cuda") model = get_peft_model(model, peft_config) with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, safe_serialization=True)
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import pickle import tempfile import unittest import warnings import pytest from parameterized import parameterized from peft import ( AdaLoraConfig, AdaptionPromptConfig, IA3Config, LoHaConfig, LoraConfig, MultitaskPromptTuningConfig, OFTConfig, PeftConfig, PrefixTuningConfig, PromptEncoder, PromptEncoderConfig, PromptTuningConfig, ) PEFT_MODELS_TO_TEST = [("lewtun/tiny-random-OPTForCausalLM-delta", "v1")] ALL_CONFIG_CLASSES = ( AdaptionPromptConfig, AdaLoraConfig, IA3Config, LoHaConfig, LoraConfig, MultitaskPromptTuningConfig, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, OFTConfig, ) class PeftConfigTester(unittest.TestCase): @parameterized.expand(ALL_CONFIG_CLASSES) def test_methods(self, config_class): r""" Test if all configs have the expected methods. Here we test - to_dict - save_pretrained - from_pretrained - from_json_file """ # test if all configs have the expected methods config = config_class() self.assertTrue(hasattr(config, "to_dict")) self.assertTrue(hasattr(config, "save_pretrained")) self.assertTrue(hasattr(config, "from_pretrained")) self.assertTrue(hasattr(config, "from_json_file")) @parameterized.expand(ALL_CONFIG_CLASSES) def test_task_type(self, config_class): config_class(task_type="test") @parameterized.expand(ALL_CONFIG_CLASSES) def test_from_pretrained(self, config_class): r""" Test if the config is correctly loaded using: - from_pretrained """ for model_name, revision in PEFT_MODELS_TO_TEST: # Test we can load config from delta config_class.from_pretrained(model_name, revision=revision) @parameterized.expand(ALL_CONFIG_CLASSES) def test_save_pretrained(self, config_class): r""" Test if the config is correctly saved and loaded using - save_pretrained """ config = config_class() with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config_from_pretrained = config_class.from_pretrained(tmp_dirname) self.assertEqual(config.to_dict(), config_from_pretrained.to_dict()) @parameterized.expand(ALL_CONFIG_CLASSES) def test_from_json_file(self, config_class): config = config_class() with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config_from_json = config_class.from_json_file(os.path.join(tmp_dirname, "adapter_config.json")) self.assertEqual(config.to_dict(), config_from_json) @parameterized.expand(ALL_CONFIG_CLASSES) def test_to_dict(self, config_class): r""" Test if the config can be correctly converted to a dict using: - to_dict """ config = config_class() self.assertTrue(isinstance(config.to_dict(), dict)) @parameterized.expand(ALL_CONFIG_CLASSES) def test_from_pretrained_cache_dir(self, config_class): r""" Test if the config is correctly loaded with extra kwargs """ with tempfile.TemporaryDirectory() as tmp_dirname: for model_name, revision in PEFT_MODELS_TO_TEST: # Test we can load config from delta config_class.from_pretrained(model_name, revision=revision, cache_dir=tmp_dirname) def test_from_pretrained_cache_dir_remote(self): r""" Test if the config is correctly loaded with a checkpoint from the hub """ with tempfile.TemporaryDirectory() as tmp_dirname: PeftConfig.from_pretrained("ybelkada/test-st-lora", cache_dir=tmp_dirname) self.assertTrue("models--ybelkada--test-st-lora" in os.listdir(tmp_dirname)) @parameterized.expand(ALL_CONFIG_CLASSES) def test_set_attributes(self, config_class): # manually set attributes and check if they are correctly written config = config_class(peft_type="test") # save pretrained with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config_from_pretrained = config_class.from_pretrained(tmp_dirname) self.assertEqual(config.to_dict(), config_from_pretrained.to_dict()) @parameterized.expand(ALL_CONFIG_CLASSES) def test_config_copy(self, config_class): # see https://github.com/huggingface/peft/issues/424 config = config_class() copied = copy.copy(config) self.assertEqual(config.to_dict(), copied.to_dict()) @parameterized.expand(ALL_CONFIG_CLASSES) def test_config_deepcopy(self, config_class): # see https://github.com/huggingface/peft/issues/424 config = config_class() copied = copy.deepcopy(config) self.assertEqual(config.to_dict(), copied.to_dict()) @parameterized.expand(ALL_CONFIG_CLASSES) def test_config_pickle_roundtrip(self, config_class): # see https://github.com/huggingface/peft/issues/424 config = config_class() copied = pickle.loads(pickle.dumps(config)) self.assertEqual(config.to_dict(), copied.to_dict()) def test_prompt_encoder_warning_num_layers(self): # This test checks that if a prompt encoder config is created with an argument that is ignored, there should be # warning. However, there should be no warning if the default value is used. kwargs = { "num_virtual_tokens": 20, "num_transformer_submodules": 1, "token_dim": 768, "encoder_hidden_size": 768, } # there should be no warning with just default argument for encoder_num_layer config = PromptEncoderConfig(**kwargs) with warnings.catch_warnings(): PromptEncoder(config) # when changing encoder_num_layer, there should be a warning for MLP since that value is not used config = PromptEncoderConfig(encoder_num_layers=123, **kwargs) with pytest.warns(UserWarning) as record: PromptEncoder(config) expected_msg = "for MLP, the argument `encoder_num_layers` is ignored. Exactly 2 MLP layers are used." assert str(record.list[0].message) == expected_msg @parameterized.expand([LoHaConfig, LoraConfig, IA3Config, OFTConfig]) def test_save_pretrained_with_target_modules(self, config_class): # See #1041, #1045 config = config_class(target_modules=["a", "list"]) with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config_from_pretrained = config_class.from_pretrained(tmp_dirname) self.assertEqual(config.to_dict(), config_from_pretrained.to_dict()) # explicit test that target_modules should be converted to set self.assertTrue(isinstance(config_from_pretrained.target_modules, set)) def test_regex_with_layer_indexing_lora(self): # This test checks that an error is raised if `target_modules` is a regex expression and `layers_to_transform` or # `layers_pattern` are not None invalid_config1 = {"target_modules": ".*foo", "layers_to_transform": [0]} invalid_config2 = {"target_modules": ".*foo", "layers_pattern": ["bar"]} valid_config = {"target_modules": ["foo"], "layers_pattern": ["bar"], "layers_to_transform": [0]} with self.assertRaisesRegex( ValueError, expected_regex="`layers_to_transform` cannot be used when `target_modules` is a str.", ): LoraConfig(**invalid_config1) with self.assertRaisesRegex( ValueError, expected_regex="`layers_pattern` cannot be used when `target_modules` is a str." ): LoraConfig(**invalid_config2) # should run without errors LoraConfig(**valid_config) def test_ia3_is_feedforward_subset_invalid_config(self): # This test checks that the IA3 config raises a value error if the feedforward_modules argument # is not a subset of the target_modules argument # an example invalid config invalid_config = {"target_modules": ["k", "v"], "feedforward_modules": ["q"]} with self.assertRaisesRegex( ValueError, expected_regex="^`feedforward_modules` should be a subset of `target_modules`$" ): IA3Config(**invalid_config) def test_ia3_is_feedforward_subset_valid_config(self): # This test checks that the IA3 config is created without errors with valid arguments. # feedforward_modules should be a subset of target_modules if both are lists # an example valid config with regex expressions. valid_config_regex_exp = { "target_modules": ".*.(SelfAttention|EncDecAttention|DenseReluDense).*(q|v|wo)$", "feedforward_modules": ".*.DenseReluDense.wo$", } # an example valid config with module lists. valid_config_list = {"target_modules": ["k", "v", "wo"], "feedforward_modules": ["wo"]} # should run without errors IA3Config(**valid_config_regex_exp) IA3Config(**valid_config_list)
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_feature_extraction_models.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from parameterized import parameterized from transformers import AutoModel from peft import PrefixTuningConfig, PromptLearningConfig from .testing_common import PeftCommonTester, PeftTestConfigManager PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST = [ "hf-internal-testing/tiny-random-BertModel", "hf-internal-testing/tiny-random-RobertaModel", "hf-internal-testing/tiny-random-DebertaModel", "hf-internal-testing/tiny-random-DebertaV2Model", ] FULL_GRID = { "model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST, "task_type": "FEATURE_EXTRACTION", } def skip_non_prompt_tuning(test_list): """Skip tests that are not prompt tuning""" return [ test for test in test_list if issubclass(test[2], PromptLearningConfig) and (test[2] != PrefixTuningConfig) ] def skip_deberta_lora_tests(test_list): r""" Skip tests that are checkpointing with lora/ia3 tests for Deberta models (couldn't find much info on the error) """ return [test for test in test_list if not (any(k in test[0] for k in ["lora", "ia3"]) and "Deberta" in test[0])] def skip_deberta_pt_tests(test_list): r""" Skip tests that are checkpointing with lora/ia3 tests for Deberta models (couldn't find much info on the error) """ return [test for test in test_list if not ("prefix_tuning" in test[0] and "Deberta" in test[0])] class PeftFeatureExtractionModelTester(unittest.TestCase, PeftCommonTester): r""" Test if the PeftModel behaves as expected. This includes: - test if the model has the expected methods We use parametrized.expand for debugging purposes to test each model individually. """ transformers_class = AutoModel def prepare_inputs_for_testing(self): input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) input_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return input_dict @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_model_attr(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs): self._test_adapter_name(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_prepare_for_training(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs): self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "task_type": "FEATURE_EXTRACTION", }, ) ) def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs): self._test_merge_layers(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training(self, test_name, model_id, config_cls, config_kwargs): self._test_training(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_deberta_pt_tests) ) def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs): self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_layer_indexing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_layer_indexing(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_deberta_lora_tests) ) def test_training_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs): self._test_inference_safetensors(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs): self._test_peft_model_device_map(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_delete_adapter(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "adalora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "task_type": "FEATURE_EXTRACTION", }, ) ) def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_unload_adapter(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "task_type": "FEATURE_EXTRACTION", }, ) ) def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs): self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_non_prompt_tuning) ) def test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs): self._test_passing_input_embeds_works(test_name, model_id, config_cls, config_kwargs)
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_stablediffusion.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import asdict, replace from unittest import TestCase import numpy as np from diffusers import StableDiffusionPipeline from parameterized import parameterized from peft import LoHaConfig, LoraConfig, OFTConfig, get_peft_model from .testing_common import ClassInstantier, PeftCommonTester from .testing_utils import temp_seed PEFT_DIFFUSERS_SD_MODELS_TO_TEST = ["hf-internal-testing/tiny-stable-diffusion-torch"] CONFIG_TESTING_KWARGS = ( { "text_encoder": { "r": 8, "lora_alpha": 32, "target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], "lora_dropout": 0.0, "bias": "none", }, "unet": { "r": 8, "lora_alpha": 32, "target_modules": ["proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2"], "lora_dropout": 0.0, "bias": "none", }, }, { "text_encoder": { "r": 8, "alpha": 32, "target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], "rank_dropout": 0.0, "module_dropout": 0.0, }, "unet": { "r": 8, "alpha": 32, "target_modules": ["proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2"], "rank_dropout": 0.0, "module_dropout": 0.0, }, }, { "text_encoder": { "r": 8, "target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], "module_dropout": 0.0, }, "unet": { "r": 8, "target_modules": ["proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2"], "module_dropout": 0.0, }, }, ) CLASSES_MAPPING = { "lora": (LoraConfig, CONFIG_TESTING_KWARGS[0]), "loha": (LoHaConfig, CONFIG_TESTING_KWARGS[1]), "lokr": (LoHaConfig, CONFIG_TESTING_KWARGS[1]), "oft": (OFTConfig, CONFIG_TESTING_KWARGS[2]), } PeftStableDiffusionTestConfigManager = ClassInstantier(CLASSES_MAPPING) class StableDiffusionModelTester(TestCase, PeftCommonTester): r""" Tests that diffusers StableDiffusion model works with PEFT as expected. """ transformers_class = StableDiffusionPipeline def instantiate_sd_peft(self, model_id, config_cls, config_kwargs): # Instantiate StableDiffusionPipeline model = self.transformers_class.from_pretrained(model_id) config_kwargs = config_kwargs.copy() text_encoder_kwargs = config_kwargs.pop("text_encoder") unet_kwargs = config_kwargs.pop("unet") # the remaining config kwargs should be applied to both configs for key, val in config_kwargs.items(): text_encoder_kwargs[key] = val unet_kwargs[key] = val # Instantiate text_encoder adapter config_text_encoder = config_cls(**text_encoder_kwargs) model.text_encoder = get_peft_model(model.text_encoder, config_text_encoder) # Instantiate unet adapter config_unet = config_cls(**unet_kwargs) model.unet = get_peft_model(model.unet, config_unet) # Move model to device model = model.to(self.torch_device) return model def prepare_inputs_for_testing(self): return { "prompt": "a high quality digital photo of a cute corgi", "num_inference_steps": 20, } @parameterized.expand( PeftStableDiffusionTestConfigManager.get_grid_parameters( { "model_ids": PEFT_DIFFUSERS_SD_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "loha_kwargs": {"init_weights": [False]}, "oft_kwargs": {"init_weights": [False]}, }, ) ) def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs): # Instantiate model & adapters model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) # Generate output for peft modified StableDiffusion dummy_input = self.prepare_inputs_for_testing() with temp_seed(seed=42): peft_output = np.array(model(**dummy_input).images[0]).astype(np.float32) # Merge adapter and model if config_cls not in [LoHaConfig, OFTConfig]: # TODO: Merging the text_encoder is leading to issues on CPU with PyTorch 2.1 model.text_encoder = model.text_encoder.merge_and_unload() model.unet = model.unet.merge_and_unload() # Generate output for peft merged StableDiffusion with temp_seed(seed=42): merged_output = np.array(model(**dummy_input).images[0]).astype(np.float32) # Images are in uint8 drange, so use large atol self.assertTrue(np.allclose(peft_output, merged_output, atol=1.0)) @parameterized.expand( PeftStableDiffusionTestConfigManager.get_grid_parameters( { "model_ids": PEFT_DIFFUSERS_SD_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, }, filter_params_func=lambda tests: [x for x in tests if all(s not in x[0] for s in ["loha", "lokr", "oft"])], ) ) def test_add_weighted_adapter_base_unchanged(self, test_name, model_id, config_cls, config_kwargs): # Instantiate model & adapters model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) # Get current available adapter config text_encoder_adapter_name = next(iter(model.text_encoder.peft_config.keys())) unet_adapter_name = next(iter(model.unet.peft_config.keys())) text_encoder_adapter_config = replace(model.text_encoder.peft_config[text_encoder_adapter_name]) unet_adapter_config = replace(model.unet.peft_config[unet_adapter_name]) # Create weighted adapters model.text_encoder.add_weighted_adapter([unet_adapter_name], [0.5], "weighted_adapter_test") model.unet.add_weighted_adapter([unet_adapter_name], [0.5], "weighted_adapter_test") # Assert that base adapters config did not change self.assertTrue( asdict(text_encoder_adapter_config) == asdict(model.text_encoder.peft_config[text_encoder_adapter_name]) ) self.assertTrue(asdict(unet_adapter_config) == asdict(model.unet.peft_config[unet_adapter_name])) @parameterized.expand( PeftStableDiffusionTestConfigManager.get_grid_parameters( { "model_ids": PEFT_DIFFUSERS_SD_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "loha_kwargs": {"init_weights": [False]}, "lokr_kwargs": {"init_weights": [False]}, "oft_kwargs": {"init_weights": [False]}, }, ) ) def test_disable_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_disable_adapter(model_id, config_cls, config_kwargs)
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_auto.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from peft import ( AutoPeftModel, AutoPeftModelForCausalLM, AutoPeftModelForFeatureExtraction, AutoPeftModelForQuestionAnswering, AutoPeftModelForSeq2SeqLM, AutoPeftModelForSequenceClassification, AutoPeftModelForTokenClassification, PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) class PeftAutoModelTester(unittest.TestCase): def test_peft_causal_lm(self): model_id = "peft-internal-testing/tiny-OPTForCausalLM-lora" model = AutoPeftModelForCausalLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForCausalLM)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForCausalLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForCausalLM)) # check if kwargs are passed correctly model = AutoPeftModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForCausalLM)) self.assertTrue(model.base_model.lm_head.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForCausalLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16) def test_peft_seq2seq_lm(self): model_id = "peft-internal-testing/tiny_T5ForSeq2SeqLM-lora" model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForSeq2SeqLM)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForSeq2SeqLM)) # check if kwargs are passed correctly model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForSeq2SeqLM)) self.assertTrue(model.base_model.lm_head.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16) def test_peft_sequence_cls(self): model_id = "peft-internal-testing/tiny_OPTForSequenceClassification-lora" model = AutoPeftModelForSequenceClassification.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForSequenceClassification)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForSequenceClassification.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForSequenceClassification)) # check if kwargs are passed correctly model = AutoPeftModelForSequenceClassification.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForSequenceClassification)) self.assertTrue(model.score.original_module.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForSequenceClassification.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16 ) def test_peft_token_classification(self): model_id = "peft-internal-testing/tiny_GPT2ForTokenClassification-lora" model = AutoPeftModelForTokenClassification.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForTokenClassification)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForTokenClassification.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForTokenClassification)) # check if kwargs are passed correctly model = AutoPeftModelForTokenClassification.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForTokenClassification)) self.assertTrue(model.base_model.classifier.original_module.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForTokenClassification.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16 ) def test_peft_question_answering(self): model_id = "peft-internal-testing/tiny_OPTForQuestionAnswering-lora" model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForQuestionAnswering)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForQuestionAnswering)) # check if kwargs are passed correctly model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForQuestionAnswering)) self.assertTrue(model.base_model.qa_outputs.original_module.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForQuestionAnswering.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16 ) def test_peft_feature_extraction(self): model_id = "peft-internal-testing/tiny_OPTForFeatureExtraction-lora" model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForFeatureExtraction)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForFeatureExtraction)) # check if kwargs are passed correctly model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForFeatureExtraction)) self.assertTrue(model.base_model.model.decoder.embed_tokens.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForFeatureExtraction.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16 ) def test_peft_whisper(self): model_id = "peft-internal-testing/tiny_WhisperForConditionalGeneration-lora" model = AutoPeftModel.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModel)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModel.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModel)) # check if kwargs are passed correctly model = AutoPeftModel.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModel)) self.assertTrue(model.base_model.model.model.encoder.embed_positions.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModel.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16)
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_custom_models.py
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import tempfile import unittest import torch from parameterized import parameterized from torch import nn from transformers.pytorch_utils import Conv1D from peft import AdaLoraConfig, IA3Config, LoHaConfig, LoKrConfig, LoraConfig, OFTConfig, PeftModel, get_peft_model from peft.tuners.tuners_utils import BaseTunerLayer from .testing_common import PeftCommonTester from .testing_utils import get_state_dict # MLP is a vanilla FF network with only linear layers # EmbConv1D has an embedding and a Conv1D layer # Conv2D has a Conv2D layer TEST_CASES = [ ######## # LoRA # ######## ("Vanilla MLP 1 LoRA", "MLP", LoraConfig, {"target_modules": "lin0"}), ("Vanilla MLP 2 LoRA", "MLP", LoraConfig, {"target_modules": ["lin0"]}), ("Vanilla MLP 3 LoRA", "MLP", LoraConfig, {"target_modules": ["lin1"]}), ("Vanilla MLP 4 LoRA", "MLP", LoraConfig, {"target_modules": ["lin0", "lin1"]}), ("Vanilla MLP 5 LoRA", "MLP", LoraConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}), ( "Vanilla MLP 6 LoRA", "MLP", LoraConfig, { "target_modules": ["lin0"], "lora_alpha": 4, "lora_dropout": 0.1, }, ), ("Embedding + transformers Conv1D 1 LoRA", "EmbConv1D", LoraConfig, {"target_modules": ["conv1d"]}), ("Embedding + transformers Conv1D 2 LoRA", "EmbConv1D", LoraConfig, {"target_modules": ["emb"]}), ("Embedding + transformers Conv1D 3 LoRA", "EmbConv1D", LoraConfig, {"target_modules": ["emb", "conv1d"]}), ("Conv2d 1 LoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d"]}), ("Conv2d 2 LoRA", "Conv2d", LoraConfig, {"target_modules": ["conv2d", "lin0"]}), ####### # IA³ # ####### ("Vanilla MLP 1 IA3", "MLP", IA3Config, {"target_modules": "lin0", "feedforward_modules": []}), ("Vanilla MLP 2 IA3", "MLP", IA3Config, {"target_modules": "lin0", "feedforward_modules": "lin0"}), ("Vanilla MLP 3 IA3", "MLP", IA3Config, {"target_modules": ["lin0"], "feedforward_modules": []}), ("Vanilla MLP 4 IA3", "MLP", IA3Config, {"target_modules": ["lin0"], "feedforward_modules": ["lin0"]}), ("Vanilla MLP 5 IA3", "MLP", IA3Config, {"target_modules": ["lin1"], "feedforward_modules": []}), ("Vanilla MLP 6 IA3", "MLP", IA3Config, {"target_modules": ["lin1"], "feedforward_modules": ["lin1"]}), ( "Vanilla MLP 7 IA3", "MLP", IA3Config, {"target_modules": ["lin0", "lin1"], "feedforward_modules": []}, ), ( "Vanilla MLP 8 IA3", "MLP", IA3Config, {"target_modules": ["lin0", "lin1"], "feedforward_modules": ["lin0", "lin1"]}, ), ( "Vanilla MLP 9 IA3", "MLP", IA3Config, {"target_modules": ["lin0"], "modules_to_save": ["lin1"], "feedforward_modules": ["lin0"]}, ), ( "transformers Conv1D 1 IA3", "EmbConv1D", IA3Config, {"target_modules": ["conv1d"], "feedforward_modules": ["conv1d"]}, ), ( "transformers Conv1D 2 IA3", "EmbConv1D", IA3Config, {"target_modules": ["conv1d", "lin0"], "feedforward_modules": ["conv1d", "lin0"]}, ), ( "transformers Conv1D 1 IA3", "EmbConv1D", IA3Config, {"target_modules": ["conv1d"], "feedforward_modules": ["conv1d"], "modules_to_save": ["lin1"]}, ), ("Conv2d 1 IA3", "Conv2d", IA3Config, {"target_modules": ["conv2d"], "feedforward_modules": []}), ("Conv2d 2 IA3", "Conv2d", IA3Config, {"target_modules": ["conv2d"], "feedforward_modules": ["conv2d"]}), ( "Conv2d 3 IA3", "Conv2d", IA3Config, {"target_modules": ["conv2d", "lin0"], "feedforward_modules": []}, ), ( "Conv2d 4 IA3", "Conv2d", IA3Config, {"target_modules": ["conv2d", "lin0"], "feedforward_modules": ["conv2d"]}, ), ( "Conv2d 5 IA3", "Conv2d", IA3Config, {"target_modules": ["conv2d", "lin0"], "feedforward_modules": ["conv2d", "lin0"]}, ), ######## # LoHa # ######## ("Vanilla MLP 1 LOHA", "MLP", LoHaConfig, {"target_modules": "lin0"}), ("Vanilla MLP 2 LOHA", "MLP", LoHaConfig, {"target_modules": ["lin0"]}), ("Vanilla MLP 3 LOHA", "MLP", LoHaConfig, {"target_modules": ["lin1"]}), ("Vanilla MLP 4 LOHA", "MLP", LoHaConfig, {"target_modules": ["lin0", "lin1"]}), ("Vanilla MLP 5 LOHA", "MLP", LoHaConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}), ( "Vanilla MLP 6 LOHA", "MLP", LoHaConfig, { "target_modules": ["lin0"], "alpha": 4, "module_dropout": 0.1, }, ), ("Vanilla MLP 7 LOHA", "MLP", LoHaConfig, {"target_modules": "lin0", "rank_dropout": 0.5}), ("Conv2d 1 LOHA", "Conv2d", LoHaConfig, {"target_modules": ["conv2d"]}), ("Conv2d 2 LOHA", "Conv2d", LoHaConfig, {"target_modules": ["conv2d", "lin0"]}), ("Conv2d 3 LOHA", "Conv2d", LoHaConfig, {"target_modules": ["conv2d"], "use_effective_conv2d": True}), ("Conv2d 4 LOHA", "Conv2d", LoHaConfig, {"target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True}), # LoKr ("Vanilla MLP 1 LOKR", "MLP", LoKrConfig, {"target_modules": "lin0"}), ("Vanilla MLP 2 LOKR", "MLP", LoKrConfig, {"target_modules": ["lin0"]}), ("Vanilla MLP 3 LOKR", "MLP", LoKrConfig, {"target_modules": ["lin1"]}), ("Vanilla MLP 4 LOKR", "MLP", LoKrConfig, {"target_modules": ["lin0", "lin1"]}), ("Vanilla MLP 5 LOKR", "MLP", LoKrConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}), ( "Vanilla MLP 6 LOKR", "MLP", LoKrConfig, { "target_modules": ["lin0"], "alpha": 4, "module_dropout": 0.1, }, ), ("Vanilla MLP 7 LOKR", "MLP", LoKrConfig, {"target_modules": "lin0", "rank_dropout": 0.5}), ("Vanilla MLP 8 LOKR", "MLP", LoKrConfig, {"target_modules": "lin0", "decompose_both": True, "r": 1, "alpha": 1}), ("Conv2d 1 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d"]}), ("Conv2d 2 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d", "lin0"]}), ("Conv2d 3 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d"], "use_effective_conv2d": True}), ("Conv2d 4 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True}), ( "Conv2d 5 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True, "decompose_both": True}, ), ( "Conv2d 6 LOKR", "Conv2d", LoKrConfig, {"target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True, "decompose_factor": 4}, ), ( "Conv2d 7 LOKR", "Conv2d", LoKrConfig, { "target_modules": ["conv2d", "lin0"], "use_effective_conv2d": True, "decompose_both": True, "decompose_factor": 4, }, ), ######## # OFT # ######## ("Vanilla MLP 1 OFT", "MLP", OFTConfig, {"target_modules": "lin0"}), ("Vanilla MLP 2 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"]}), ("Vanilla MLP 5 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}), ( "Vanilla MLP 6 OFT", "MLP", OFTConfig, { "target_modules": ["lin0"], "module_dropout": 0.1, }, ), ("Vanilla MLP 7 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"], "coft": True}), ("Vanilla MLP 8 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"], "block_share": True}), ("Vanilla MLP 9 OFT", "MLP", OFTConfig, {"target_modules": ["lin0"], "coft": True, "block_share": True}), ("Conv2d 1 OFT", "Conv2d", OFTConfig, {"target_modules": ["conv2d"]}), ("Conv2d 3 OFT", "Conv2d", OFTConfig, {"target_modules": ["conv2d"], "coft": True}), ("Conv2d 4 OFT", "Conv2d", OFTConfig, {"target_modules": ["conv2d"], "block_share": True}), ("Conv2d 5 OFT", "Conv2d", OFTConfig, {"target_modules": ["conv2d"], "coft": True, "block_share": True}), ] MULTIPLE_ACTIVE_ADAPTERS_TEST_CASES = [ ( "LoRA Same", "lora", LoraConfig, {"target_modules": ["lin0"], "init_lora_weights": False}, {"target_modules": ["lin0"], "init_lora_weights": False}, ), ( "LoRA Different", "lora", LoraConfig, {"target_modules": ["lin0"], "init_lora_weights": False}, {"target_modules": ["lin1"], "init_lora_weights": False}, ), ( "IA3 Same", "ia3", IA3Config, { "target_modules": ["lin0"], "feedforward_modules": ["lin0"], "init_ia3_weights": False, }, { "target_modules": ["lin0"], "feedforward_modules": ["lin0"], "init_ia3_weights": False, }, ), ( "IA3 Different", "ia3", IA3Config, { "target_modules": ["lin0"], "feedforward_modules": ["lin0"], "init_ia3_weights": False, }, { "target_modules": ["lin1"], "feedforward_modules": ["lin1"], "init_ia3_weights": False, }, ), ( "AdaLora Same", "adalora", AdaLoraConfig, {"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True}, {"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True}, ), ( "AdaLora Different", "adalora", AdaLoraConfig, {"target_modules": ["lin0"], "init_lora_weights": False, "inference_mode": True}, {"target_modules": ["lin1"], "init_lora_weights": False, "inference_mode": True}, ), ] PREFIXES = { IA3Config: "ia3_", LoraConfig: "lora_", LoHaConfig: "hada_", LoKrConfig: "lokr_", OFTConfig: "oft_", } class MLP(nn.Module): def __init__(self, bias=True): super().__init__() self.lin0 = nn.Linear(10, 20, bias=bias) self.relu = nn.ReLU() self.drop = nn.Dropout(0.5) self.lin1 = nn.Linear(20, 2, bias=bias) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = X.float() X = self.lin0(X) X = self.relu(X) X = self.drop(X) X = self.lin1(X) X = self.sm(X) return X class Block(nn.Module): def __init__(self, bias=True): super().__init__() self.lin0 = nn.Linear(10, 20, bias=bias) self.relu = nn.ReLU() self.drop = nn.Dropout(0.5) self.lin1 = nn.Linear(20, 10, bias=bias) def forward(self, X): X = X.float() X = self.lin0(X) X = self.relu(X) X = self.drop(X) X = self.lin1(X) return X class DeepMLP(nn.Module): def __init__(self, bias=True, num_hidden_layers=12): super().__init__() self.layers = nn.ModuleList([Block(bias=bias) for _ in range(num_hidden_layers)]) self.out = nn.Linear(10, 2, bias=bias) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = X.float(X) for layer in self.layers: X = layer(X) X = self.out(X) X = self.sm(X) return X class ModelEmbConv1D(nn.Module): def __init__(self): super().__init__() self.emb = nn.Embedding(100, 5) self.conv1d = Conv1D(1, 5) self.relu = nn.ReLU() self.flat = nn.Flatten() self.lin0 = nn.Linear(10, 2) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = self.emb(X) X = self.conv1d(X) X = self.relu(X) X = self.flat(X) X = self.lin0(X) X = self.sm(X) return X class ModelEmbWithEmbeddingUtils(nn.Module): # Adds `get_input_embeddings` and `get_output_embeddings` methods to mimic 🤗 transformers models def __init__(self): super().__init__() self.embed_tokens = nn.Embedding(100, 5) self.conv1d = Conv1D(1, 5) self.relu = nn.ReLU() self.flat = nn.Flatten() self.lin0 = nn.Linear(10, 2) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = self.embed_tokens(X) X = self.conv1d(X) X = self.relu(X) X = self.flat(X) X = self.lin0(X) X = self.sm(X) return X def get_input_embeddings(self): return self.embed_tokens def get_output_embeddings(self): return None class ModelConv2D(nn.Module): def __init__(self): super().__init__() self.conv2d = nn.Conv2d(5, 10, 3) self.relu = nn.ReLU() self.flat = nn.Flatten() self.lin0 = nn.Linear(10, 2) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = X.float().reshape(2, 5, 3, 3) X = self.conv2d(X) X = self.relu(X) X = self.flat(X) X = self.lin0(X) X = self.sm(X) return X class MockTransformerWrapper: """Mock class to behave like a transformers model. This is needed because the tests initialize the model by calling transformers_class.from_pretrained. """ @classmethod def from_pretrained(cls, model_id, torch_dtype=None): # set the seed so that from_pretrained always returns the same model torch.manual_seed(0) if torch_dtype is None: torch_dtype = torch.float32 if model_id == "MLP": return MLP().to(torch_dtype) if model_id == "EmbConv1D": return ModelEmbConv1D().to(torch_dtype) if model_id == "Conv2d": return ModelConv2D().to(torch_dtype) raise ValueError(f"model_id {model_id} not implemented") class PeftCustomModelTester(unittest.TestCase, PeftCommonTester): """TODO""" transformers_class = MockTransformerWrapper def prepare_inputs_for_testing(self): X = torch.arange(90).view(9, 10).to(self.torch_device) return {"X": X} @parameterized.expand(TEST_CASES) def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_model_attr(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs): self._test_adapter_name(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs): # This test does not work with custom models because it assumes that # there is always a method get_input_embeddings that returns a layer # which does not need updates. Instead, a new test is added below that # checks that LoRA works as expected. pass @parameterized.expand(TEST_CASES) def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_save_pretrained_pickle(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs, safe_serialization=False) @parameterized.expand(TEST_CASES) def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs): self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs): config_kwargs = config_kwargs.copy() if issubclass(config_cls, LoraConfig): config_kwargs["init_lora_weights"] = False elif issubclass(config_cls, IA3Config): config_kwargs["init_ia3_weights"] = False self._test_merge_layers(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_merge_layers_fp16(self, test_name, model_id, config_cls, config_kwargs): config_kwargs = config_kwargs.copy() if issubclass(config_cls, LoraConfig): config_kwargs["init_lora_weights"] = False elif issubclass(config_cls, IA3Config): config_kwargs["init_ia3_weights"] = False self._test_merge_layers_fp16(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_generate(self, test_name, model_id, config_cls, config_kwargs): # Custom models do not (necessarily) have a generate method, so this test is not performed pass @parameterized.expand(TEST_CASES) def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs): # Custom models do not (necessarily) have a generate method, so this test is not performed pass @parameterized.expand(TEST_CASES) def test_training_custom_models(self, test_name, model_id, config_cls, config_kwargs): self._test_training(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_training_custom_models_layer_indexing(self, test_name, model_id, config_cls, config_kwargs): # At the moment, layer indexing only works when layer names conform to a specific pattern, which is not # guaranteed here. Therefore, this test is not performed. pass @parameterized.expand(TEST_CASES) def test_training_custom_models_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs): self._test_inference_safetensors(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs): self._test_peft_model_device_map(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_forward_output_finite(self, test_name, model_id, config_cls, config_kwargs): X = self.prepare_inputs_for_testing() model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model.eval() with torch.no_grad(): output = model(**X) self.assertTrue(torch.isfinite(output).all()) @parameterized.expand(TEST_CASES) def test_only_params_are_updated(self, test_name, model_id, config_cls, config_kwargs): # An explicit test that when using LoRA on a custom model, only the LoRA parameters are updated during training X = self.prepare_inputs_for_testing() model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model_before = copy.deepcopy(model) model.train() optimizer = torch.optim.SGD(model.parameters(), lr=0.5) # train at least 3 steps for all parameters to be updated (probably this is required because of symmetry # breaking of some LoRA layers that are initialized with constants) for _ in range(3): optimizer.zero_grad() y_pred = model(**X) loss = y_pred.sum() loss.backward() optimizer.step() tol = 1e-4 params_before = dict(model_before.named_parameters()) params_after = dict(model.named_parameters()) self.assertEqual(params_before.keys(), params_after.keys()) prefix = PREFIXES[config_cls] for name, param_before in params_before.items(): param_after = params_after[name] if (prefix in name) or ("modules_to_save" in name): # target_modules and modules_to_save _are_ updated self.assertFalse(torch.allclose(param_before, param_after, atol=tol, rtol=tol)) else: self.assertTrue(torch.allclose(param_before, param_after, atol=tol, rtol=tol)) @parameterized.expand(TEST_CASES) def test_parameters_after_loading_model(self, test_name, model_id, config_cls, config_kwargs): # An explicit test that when loading a trained model, the parameters are loaded correctly # see issue #808 X = self.prepare_inputs_for_testing() model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model.train() optimizer = torch.optim.SGD(model.parameters(), lr=0.5) # train at least 3 steps for all parameters to be updated (probably this is required because of symmetry # breaking of some LoRA layers that are initialized with constants) for _ in range(3): optimizer.zero_grad() y_pred = model(**X) loss = y_pred.sum() loss.backward() optimizer.step() tol = 1e-4 params_before = get_state_dict(model) # note: no need to sanity check if parameters were updated at all, this # is already covered in the previous test with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id).to(self.torch_device) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) params_after = get_state_dict(model_from_pretrained) self.assertEqual(params_before.keys(), params_after.keys()) for name, param_before in params_before.items(): param_after = params_after[name] self.assertTrue(torch.allclose(param_before, param_after, atol=tol, rtol=tol)) @parameterized.expand(TEST_CASES) def test_disable_adapters(self, test_name, model_id, config_cls, config_kwargs): X = self.prepare_inputs_for_testing() model = self.transformers_class.from_pretrained(model_id).to(self.torch_device).eval() outputs_base = model(**X) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model.eval() outputs_before = model(**X) self.assertTrue(torch.allclose(outputs_base, outputs_before)) model.train() # EmbConv1D is slow to learn for some reason lr = 0.01 if model_id != "EmbConv1D" else 1.0 optimizer = torch.optim.SGD(model.parameters(), lr=lr) # train at least 3 steps for all parameters to be updated (probably this is required because of symmetry # breaking of some LoRA layers that are initialized with constants) for _ in range(3): optimizer.zero_grad() y_pred = model(**X) y = torch.arange(len(y_pred)).to(self.torch_device) % 2 loss = nn.functional.nll_loss(y_pred, y) loss.backward() optimizer.step() model.eval() outputs_after = model(**X) with model.disable_adapter(): outputs_disabled = model(**X) # check that after leaving the disable_adapter context, everything is enabled again outputs_enabled_after_disable = model(**X) self.assertFalse(torch.allclose(outputs_before, outputs_after)) self.assertTrue(torch.allclose(outputs_before, outputs_disabled)) self.assertTrue(torch.allclose(outputs_after, outputs_enabled_after_disable)) @parameterized.expand(TEST_CASES) def test_disable_adapters_with_merging(self, test_name, model_id, config_cls, config_kwargs): # same as test_disable_adapters, but with merging X = self.prepare_inputs_for_testing() model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model.eval() outputs_before = model(**X) model.train() lr = 0.01 # Adam optimizer since SGD isn't great for small models with IA3 + Conv1D optimizer = torch.optim.Adam(model.parameters(), lr=lr) # train at least 3 steps for all parameters to be updated (probably this is required because of symmetry # breaking of some LoRA layers that are initialized with constants) for _ in range(3): optimizer.zero_grad() y_pred = model(**X) y = torch.arange(len(y_pred)).to(self.torch_device) % 2 loss = nn.functional.nll_loss(y_pred, y) loss.backward() optimizer.step() model.eval() model.merge_adapter() outputs_after = model(**X) with model.disable_adapter(): outputs_disabled = model(**X) # check that after leaving the disable_adapter context, everything is enabled again outputs_enabled_after_disable = model(**X) atol, rtol = 1e-5, 1e-5 # tolerances higher than defaults since merging introduces some numerical instability if issubclass(config_cls, IA3Config) and model_id == "Conv2d": # more instability with Conv2d + IA3 atol, rtol = 1e-3, 1e-3 # check that there is a difference in results after training self.assertFalse(torch.allclose(outputs_before, outputs_after, atol=atol, rtol=rtol)) # check that disabling adapters gives the same results as before training self.assertTrue(torch.allclose(outputs_before, outputs_disabled, atol=atol, rtol=rtol)) # check that enabling + disabling adapters does not change the results self.assertTrue(torch.allclose(outputs_after, outputs_enabled_after_disable, atol=atol, rtol=rtol)) @parameterized.expand(TEST_CASES) def test_disable_adapter_with_bias_warns(self, test_name, model_id, config_cls, config_kwargs): # When training biases in lora, disabling adapters does not reset the biases, so the output is not what users # might expect. Therefore, a warning should be given. # Note: We test only with custom models since they run really fast. There is really no point in testing the same # thing with decoder, encoder_decoder, etc. if config_cls != LoraConfig: # skip this test for other configs as bias is specific to Lora self.skipTest("Testing bias warnings only for LoraConfig") if not issubclass(config_cls, LoraConfig): self.skipTest("Bias argument is only supported for LoRA models") def run_with_disable(config_kwargs, bias): config_kwargs = config_kwargs.copy() config_kwargs["bias"] = bias model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) peft_model = get_peft_model(model, config) with peft_model.disable_adapter(): pass # there is nothing to be done # check that bias=all and bias=lora_only give a warning with the correct message msg_start = "Careful, disabling adapter layers with bias configured to be" with self.assertWarns(UserWarning, msg=msg_start): run_with_disable(config_kwargs, bias="lora_only") with self.assertWarns(UserWarning, msg=msg_start): run_with_disable(config_kwargs, bias="all") # For bias=none, there is no warning. Unfortunately, AFAIK unittest has no option to assert that no warning is # given, therefore, we check that the unittest gives us an AssertionError if we check for a warning bias_warning_was_given = False try: with self.assertWarns(UserWarning) as cm: run_with_disable(config_kwargs, bias="none") # if we get here, it means there was no AssertionError, i.e. there are warnings -- let's check that they # are not related to the bias setting if any(warning.message.args[0].startswith(msg_start) for warning in cm.warnings): bias_warning_was_given = True except AssertionError: # This is good, there was an AssertionError, i.e. there was no warning pass if bias_warning_was_given: # This is bad, there was a warning about the bias when there should not have been any. self.fail("There should be no warning when bias is set to 'none'") @parameterized.expand(TEST_CASES) def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_delete_adapter(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_adding_multiple_adapters_with_bias_raises(self, test_name, model_id, config_cls, config_kwargs): self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs) def test_existing_model_card(self): # ensure that if there is already a model card, it is not overwritten model = MLP() config = LoraConfig(target_modules=["lin0"]) model = get_peft_model(model, config) with tempfile.TemporaryDirectory() as tmp_dirname: # create a model card text = "---\nmeta: hello\n---\nThis is a model card\n" with open(os.path.join(tmp_dirname, "README.md"), "w") as f: f.write(text) model.save_pretrained(tmp_dirname) with open(os.path.join(tmp_dirname, "README.md"), "r") as f: model_card = f.read() self.assertIn("library_name: peft", model_card) self.assertIn("meta: hello", model_card) self.assertIn("This is a model card", model_card) def test_non_existing_model_card(self): # ensure that if there is already a model card, it is not overwritten model = MLP() config = LoraConfig(target_modules=["lin0"]) model = get_peft_model(model, config) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) with open(os.path.join(tmp_dirname, "README.md"), "r") as f: model_card = f.read() self.assertIn("library_name: peft", model_card) # rough check that the model card is pre-filled self.assertGreater(len(model_card), 1000) @parameterized.expand(["auto", True, False]) def test_targeting_lora_to_embedding_layer(self, save_embedding_layers): model = ModelEmbWithEmbeddingUtils() config = LoraConfig(target_modules=["embed_tokens", "lin0"], init_lora_weights=False) model = get_peft_model(model, config) with tempfile.TemporaryDirectory() as tmp_dirname: if save_embedding_layers == "auto": # assert warning msg_start = "Setting `save_embedding_layers` to `True` as embedding layers found in `target_modules`." with self.assertWarns(UserWarning, msg=msg_start): model.save_pretrained(tmp_dirname, save_embedding_layers=save_embedding_layers) else: model.save_pretrained(tmp_dirname, save_embedding_layers=save_embedding_layers) from safetensors.torch import load_file as safe_load_file state_dict = safe_load_file(os.path.join(tmp_dirname, "adapter_model.safetensors")) if save_embedding_layers in ["auto", True]: self.assertTrue("base_model.model.embed_tokens.base_layer.weight" in state_dict) self.assertTrue( torch.allclose( model.base_model.model.embed_tokens.base_layer.weight, state_dict["base_model.model.embed_tokens.base_layer.weight"], ) ) else: self.assertFalse("base_model.model.embed_tokens.base_layer.weight" in state_dict) del state_dict @parameterized.expand(["auto", True, False]) def test_targeting_lora_to_embedding_layer_non_transformers(self, save_embedding_layers): model = ModelEmbConv1D() config = LoraConfig(target_modules=["emb", "lin0"], init_lora_weights=False) model = get_peft_model(model, config) with tempfile.TemporaryDirectory() as tmp_dirname: if save_embedding_layers is True: # assert warning msg_start = "Could not identify embedding layer(s) because the model is not a 🤗 transformers model." with self.assertWarns(UserWarning, msg=msg_start): model.save_pretrained(tmp_dirname, save_embedding_layers=save_embedding_layers) else: model.save_pretrained(tmp_dirname, save_embedding_layers=save_embedding_layers) from safetensors.torch import load_file as safe_load_file state_dict = safe_load_file(os.path.join(tmp_dirname, "adapter_model.safetensors")) self.assertFalse("base_model.model.emb.base_layer.weight" in state_dict) del state_dict @parameterized.expand( [ LoraConfig(target_modules=["lin0"], init_lora_weights=False), LoKrConfig(target_modules=["lin0"], init_weights=False), LoHaConfig(target_modules=["lin0"], init_weights=False), AdaLoraConfig(target_modules=["lin0"], init_lora_weights=False), IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"], init_ia3_weights=False), OFTConfig(target_modules=["lin0"], init_weights=False), ] ) def test_adapter_name_makes_no_difference(self, config0): # It should not matter whether we use the default adapter name or a custom one model_cls = MLP input = torch.arange(90).reshape(9, 10).to(self.torch_device) # base model torch.manual_seed(0) base_model = model_cls().eval().to(self.torch_device) output_base = base_model(input) # default name torch.manual_seed(0) base_model = model_cls().eval().to(self.torch_device) torch.manual_seed(0) peft_model_default = get_peft_model(base_model, config0, adapter_name="default").eval().to(self.torch_device) output_default = peft_model_default(input) sd_default = peft_model_default.state_dict() # custom name 1 torch.manual_seed(0) base_model = model_cls().eval().to(self.torch_device) torch.manual_seed(0) peft_model_custom1 = get_peft_model(base_model, config0, adapter_name="adapter").eval().to(self.torch_device) output_custom1 = peft_model_custom1(input) sd_custom1 = peft_model_custom1.state_dict() # custom name 2 torch.manual_seed(0) base_model = model_cls().eval().to(self.torch_device) torch.manual_seed(0) peft_model_custom2 = ( get_peft_model(base_model, config0, adapter_name="other-name").eval().to(self.torch_device) ) output_custom2 = peft_model_custom2(input) sd_custom2 = peft_model_custom2.state_dict() assert len(sd_default) == len(sd_custom1) == len(sd_custom2) for key in sd_default: key1 = key.replace("default", "adapter") key2 = key.replace("default", "other-name") assert key1 in sd_custom1 assert key2 in sd_custom2 for k0, k1, k2 in zip(sd_default, sd_custom1, sd_custom2): assert torch.allclose(sd_default[k0], sd_custom1[k1]) assert torch.allclose(sd_default[k0], sd_custom2[k2]) self.assertFalse(torch.allclose(output_base, output_default)) self.assertFalse(torch.allclose(output_base, output_custom1)) self.assertFalse(torch.allclose(output_base, output_custom2)) self.assertTrue(torch.allclose(output_custom1, output_custom2)) self.assertTrue(torch.allclose(output_default, output_custom1)) class TestMultiRankAdapter(unittest.TestCase): """Tests related to multirank LoRA adapters""" def test_multirank(self): config_1 = LoraConfig( r=8, lora_alpha=8, init_lora_weights=False, target_modules=["lin0", "lin1"], ) config_2 = LoraConfig( r=8, lora_alpha=8, init_lora_weights=False, target_modules=["lin0", "lin1"], rank_pattern={"lin0": 4}, alpha_pattern={"lin0": 4}, ) # Add first adapter model = get_peft_model(MLP(), config_1, adapter_name="first") # Add second adapter model.add_adapter("second", config_2) # Extract current and expected ranks rank_current = model.lin0.lora_A["second"].weight.shape[0] rank_expected = config_2.rank_pattern["lin0"] self.assertTrue(rank_current == rank_expected, f"Rank {rank_current} is not equal to expected {rank_expected}") def test_multirank_2(self): rank_pattern = {} alpha_pattern = {} r = 4 lora_alpha = 8 for i in range(10): rank = 64 // (i + 1) for j in range(2): rank_pattern[f"layers.{i}.lin{j}"] = rank alpha_pattern[f"layers.{i}.lin{j}"] = 2 * rank config = LoraConfig( r=r, lora_alpha=lora_alpha, init_lora_weights=False, target_modules=["lin0", "lin1"], rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, ) # Add first adapter model = get_peft_model(DeepMLP(), config, adapter_name="first") # Add second adapter model.add_adapter("second", config) for adapter in ["first", "second"]: for key, module in model.base_model.model.named_modules(): if isinstance(module, BaseTunerLayer): rank_expected = rank_pattern.get(key, r) rank_current = module.lora_A[adapter].weight.shape[0] self.assertTrue( rank_current == rank_expected, f"Rank {rank_current} is not equal to expected {rank_expected}" ) class TestRepr(unittest.TestCase): """Tests related to the repr of adapted models""" def test_repr_lora_linear(self): config = LoraConfig(target_modules=["lin0"]) model = get_peft_model(MLP(), config) print_output = repr(model.model.lin0) self.assertTrue(print_output.startswith("lora.Linear")) self.assertTrue("in_features=10" in print_output) self.assertTrue("out_features=20" in print_output) self.assertTrue("lora_A" in print_output) self.assertTrue("lora_B" in print_output) self.assertTrue("default" in print_output) def test_repr_lora_embedding(self): config = LoraConfig(target_modules=["emb"]) model = get_peft_model(ModelEmbConv1D(), config) print_output = repr(model.model.emb) self.assertTrue(print_output.startswith("lora.Embedding")) self.assertTrue("100, 5" in print_output) self.assertTrue("lora_embedding_A" in print_output) self.assertTrue("lora_embedding_B" in print_output) self.assertTrue("default" in print_output) def test_repr_lora_conv1d(self): config = LoraConfig(target_modules=["conv1d"]) model = get_peft_model(ModelEmbConv1D(), config) print_output = repr(model.model.conv1d) self.assertTrue(print_output.startswith("lora.Linear")) self.assertTrue("in_features=5" in print_output) self.assertTrue("out_features=1" in print_output) self.assertTrue("lora_A" in print_output) self.assertTrue("lora_B" in print_output) self.assertTrue("default" in print_output) def test_repr_lora_conv2d(self): config = LoraConfig(target_modules=["conv2d"]) model = get_peft_model(ModelConv2D(), config) print_output = repr(model.model.conv2d) self.assertTrue(print_output.startswith("lora.Conv2d")) self.assertTrue("5, 10" in print_output) self.assertTrue("kernel_size=(3, 3)" in print_output) self.assertTrue("stride=(1, 1)" in print_output) self.assertTrue("lora_A" in print_output) self.assertTrue("lora_B" in print_output) self.assertTrue("default" in print_output) class MultipleActiveAdaptersTester(unittest.TestCase): """ A test class to test the functionality of multiple active adapters. This is not specifically tied to custom models, it's just easy to test here and testing it on all types of models would be overkill. """ def prepare_inputs_for_testing(self): X = torch.arange(90).view(9, 10) return {"X": X} def set_multiple_active_adapters(self, model, adapter_names): for module in model.modules(): if isinstance(module, BaseTunerLayer): module.set_adapter(adapter_names) @parameterized.expand(MULTIPLE_ACTIVE_ADAPTERS_TEST_CASES) def test_multiple_active_adapters_forward( self, test_name, tuner_method, config_cls, config_kwargs_1, config_kwargs_2 ): model = MLP(bias=tuner_method != "ia3") model.eval() X = self.prepare_inputs_for_testing() config_1 = config_cls(**config_kwargs_1) config_2 = config_cls(**config_kwargs_2) peft_model = get_peft_model(model, config_1, adapter_name="adapter_1") peft_model.add_adapter("adapter_2", config_2) # set adapter_1 peft_model.set_adapter("adapter_1") adapter_1_output = peft_model(**X) # set adapter_2 peft_model.set_adapter("adapter_2") adapter_2_output = peft_model(**X) # set ["adapter_1", "adapter_2"] self.set_multiple_active_adapters(peft_model, ["adapter_1", "adapter_2"]) combined_output = peft_model(**X) self.assertFalse(torch.allclose(adapter_1_output, adapter_2_output, atol=1e-5)) self.assertFalse(torch.allclose(adapter_1_output, combined_output, atol=1e-5)) self.assertFalse(torch.allclose(adapter_2_output, combined_output, atol=1e-5)) if tuner_method == "lora": # create a weighted adapter combining both adapters and check that # its output is same as setting multiple active adapters peft_model.add_weighted_adapter( ["adapter_1", "adapter_2"], [1.0, 1.0], "new_combined_adapter", combination_type="cat" ) peft_model.set_adapter("new_combined_adapter") new_combined_output = peft_model(**X) self.assertTrue(torch.allclose(new_combined_output, combined_output, atol=1e-5)) @parameterized.expand(MULTIPLE_ACTIVE_ADAPTERS_TEST_CASES) def test_multiple_active_adapters_merge_and_unmerge( self, test_name, tuner_method, config_cls, config_kwargs_1, config_kwargs_2 ): model = MLP(bias=tuner_method != "ia3") model.eval() X = self.prepare_inputs_for_testing() base_output = model(**X) config_1 = config_cls(**config_kwargs_1) config_2 = config_cls(**config_kwargs_2) peft_model = get_peft_model(model, config_1, adapter_name="adapter_1") peft_model.add_adapter("adapter_2", config_2) # set ["adapter_1", "adapter_2"] self.set_multiple_active_adapters(peft_model, ["adapter_1", "adapter_2"]) combined_output = peft_model(**X) peft_model.merge_adapter() merged_combined_output = peft_model(**X) self.assertTrue(torch.allclose(merged_combined_output, combined_output, atol=1e-5)) peft_model.unmerge_adapter() with peft_model.disable_adapter(): disabled_adapter_output = peft_model(**X) self.assertTrue(torch.allclose(disabled_adapter_output, base_output, atol=1e-4)) @parameterized.expand(MULTIPLE_ACTIVE_ADAPTERS_TEST_CASES) def test_merge_layers_multi(self, test_name, tuner_method, config_cls, config_kwargs_1, config_kwargs_2): model = MLP(bias=tuner_method != "ia3") model.eval() config_1 = config_cls(**config_kwargs_1) config_2 = config_cls(**config_kwargs_2) model = get_peft_model(model, config_1) dummy_input = self.prepare_inputs_for_testing() model.eval() with torch.inference_mode(): logits_adapter_1 = model(**dummy_input)[0] model.add_adapter("adapter-2", config_2) model.set_adapter("adapter-2") model.eval() with torch.inference_mode(): logits_adapter_2 = model(**dummy_input)[0] self.assertFalse(torch.allclose(logits_adapter_1, logits_adapter_2, atol=1e-3, rtol=1e-3)) model.set_adapter("default") with torch.inference_mode(): logits_adapter_1_after_set = model(**dummy_input)[0] self.assertTrue(torch.allclose(logits_adapter_1_after_set, logits_adapter_1, atol=1e-3, rtol=1e-3)) model_copy = copy.deepcopy(model) model_copy_2 = copy.deepcopy(model) model_merged_all = model.merge_and_unload(adapter_names=["adapter-2", "default"]) with torch.inference_mode(): logits_merged_all = model_merged_all(**dummy_input)[0] self.assertFalse(torch.allclose(logits_merged_all, logits_adapter_2, atol=1e-3, rtol=1e-3)) self.assertFalse(torch.allclose(logits_merged_all, logits_adapter_1, atol=1e-3, rtol=1e-3)) model_merged_adapter_2 = model_copy.merge_and_unload(adapter_names=["adapter-2"]) with torch.inference_mode(): logits_merged_adapter_2 = model_merged_adapter_2(**dummy_input)[0] self.assertTrue(torch.allclose(logits_merged_adapter_2, logits_adapter_2, atol=1e-3, rtol=1e-3)) model_merged_adapter_default = model_copy_2.merge_and_unload(adapter_names=["default"]) with torch.inference_mode(): logits_merged_adapter_default = model_merged_adapter_default(**dummy_input)[0] self.assertTrue(torch.allclose(logits_merged_adapter_default, logits_adapter_1, atol=1e-3, rtol=1e-3)) class RequiresGradTester(unittest.TestCase): """Test that requires_grad is set correctly in specific circumstances # See issue #899. This is not specifically tied to custom models, it's just easy to test here and testing it on all types of models would be overkill. """ def check_requires_grad(self, model, *params_expected: str): # Check that only the given parameters have requires_grad=True, and all others have requires_grad=False. # Calling without arguments besides the model means that all parameters should have requires_grad=False. params_with_requires_grad = [name for name, param in model.named_parameters() if param.requires_grad] diff = set(params_expected).symmetric_difference(set(params_with_requires_grad)) msg = f"Expected {params_expected} to require gradients, got {params_with_requires_grad}" self.assertEqual(len(diff), 0, msg=msg) def test_requires_grad_modules_to_save_default(self): config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"]) peft_model = get_peft_model(MLP(), config) self.check_requires_grad( peft_model, "base_model.model.lin1.modules_to_save.default.weight", "base_model.model.lin1.modules_to_save.default.bias", "base_model.model.lin0.lora_A.default.weight", "base_model.model.lin0.lora_B.default.weight", ) def test_requires_grad_modules_to_save_disabling(self): config = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"]) peft_model = get_peft_model(MLP(), config) # when disabling the adapter, the original module's grad should be enabled and vice versa peft_model.disable_adapter_layers() self.check_requires_grad( peft_model, "base_model.model.lin1.original_module.weight", "base_model.model.lin1.original_module.bias", ) # when re-enabling the adapter, the original module's grad should be disabled and vice versa peft_model.enable_adapter_layers() self.check_requires_grad( peft_model, "base_model.model.lin1.modules_to_save.default.weight", "base_model.model.lin1.modules_to_save.default.bias", "base_model.model.lin0.lora_A.default.weight", "base_model.model.lin0.lora_B.default.weight", ) # when using the disable_adapter context, the original module's grad should be enabled and vice versa with peft_model.disable_adapter(): self.check_requires_grad( peft_model, "base_model.model.lin1.original_module.weight", "base_model.model.lin1.original_module.bias", ) # after context is exited, return to the previous state self.check_requires_grad( peft_model, "base_model.model.lin1.modules_to_save.default.weight", "base_model.model.lin1.modules_to_save.default.bias", "base_model.model.lin0.lora_A.default.weight", "base_model.model.lin0.lora_B.default.weight", ) def test_requires_grad_modules_to_save_multiple_adapters(self): config0 = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"]) peft_model = get_peft_model(MLP(), config0) config1 = LoraConfig(target_modules=["lin0"], modules_to_save=["lin1"]) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin1.modules_to_save.default.weight", "base_model.model.lin1.modules_to_save.default.bias", "base_model.model.lin0.lora_A.default.weight", "base_model.model.lin0.lora_B.default.weight", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin1.modules_to_save.default.weight", "base_model.model.lin1.modules_to_save.default.bias", "base_model.model.lin0.lora_A.default.weight", "base_model.model.lin0.lora_B.default.weight", ) # set config1 as active, should lead to adapter1 requiring grad peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin1.modules_to_save.adapter1.weight", "base_model.model.lin1.modules_to_save.adapter1.bias", "base_model.model.lin0.lora_A.adapter1.weight", "base_model.model.lin0.lora_B.adapter1.weight", ) def test_requires_grad_lora_different_targets(self): # test two different LoRA adapters that target different modules config0 = LoraConfig(target_modules=["lin0"]) peft_model = get_peft_model(MLP(), config0) config1 = LoraConfig(target_modules=["lin1"]) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.default.weight", "base_model.model.lin0.lora_B.default.weight", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.default.weight", "base_model.model.lin0.lora_B.default.weight", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin1.lora_A.adapter1.weight", "base_model.model.lin1.lora_B.adapter1.weight", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state self.check_requires_grad( peft_model, "base_model.model.lin1.lora_A.adapter1.weight", "base_model.model.lin1.lora_B.adapter1.weight", ) def test_requires_grad_lora_same_targets(self): # same as previous test, except that LoRA adapters target the same layer config0 = LoraConfig(target_modules=["lin0"]) peft_model = get_peft_model(MLP(), config0) config1 = LoraConfig(target_modules=["lin0"]) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.default.weight", "base_model.model.lin0.lora_B.default.weight", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.default.weight", "base_model.model.lin0.lora_B.default.weight", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.adapter1.weight", "base_model.model.lin0.lora_B.adapter1.weight", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.adapter1.weight", "base_model.model.lin0.lora_B.adapter1.weight", ) def test_requires_grad_ia3_different_targets(self): # test two different IA3 adapters that target different modules config0 = IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"]) peft_model = get_peft_model(MLP(), config0) config1 = IA3Config(target_modules=["lin1"], feedforward_modules=["lin1"]) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin0.ia3_l.default", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin0.ia3_l.default", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin1.ia3_l.adapter1", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state self.check_requires_grad( peft_model, "base_model.model.lin1.ia3_l.adapter1", ) def test_requires_grad_ia3_same_targets(self): # same as previous test, except that IA3 adapters target the same layer config0 = IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"]) peft_model = get_peft_model(MLP(), config0) config1 = IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"]) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin0.ia3_l.default", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin0.ia3_l.default", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.ia3_l.adapter1", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state self.check_requires_grad( peft_model, "base_model.model.lin0.ia3_l.adapter1", ) def test_requires_grad_adalora_different_targets(self): # test two different AdaLora adapters that target different modules config0 = AdaLoraConfig(target_modules=["lin0"]) peft_model = get_peft_model(MLP(), config0) config1 = AdaLoraConfig(target_modules=["lin1"], inference_mode=True) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.default", "base_model.model.lin0.lora_B.default", "base_model.model.lin0.lora_E.default", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.default", "base_model.model.lin0.lora_B.default", "base_model.model.lin0.lora_E.default", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin1.lora_A.adapter1", "base_model.model.lin1.lora_B.adapter1", "base_model.model.lin1.lora_E.adapter1", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state self.check_requires_grad( peft_model, "base_model.model.lin1.lora_A.adapter1", "base_model.model.lin1.lora_B.adapter1", "base_model.model.lin1.lora_E.adapter1", ) def test_requires_grad_adalora_same_targets(self): # same as previous test, except that AdaLora adapters target the same layer config0 = AdaLoraConfig(target_modules=["lin0"]) peft_model = get_peft_model(MLP(), config0) config1 = AdaLoraConfig(target_modules=["lin0"], inference_mode=True) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.default", "base_model.model.lin0.lora_B.default", "base_model.model.lin0.lora_E.default", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.default", "base_model.model.lin0.lora_B.default", "base_model.model.lin0.lora_E.default", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.adapter1", "base_model.model.lin0.lora_B.adapter1", "base_model.model.lin0.lora_E.adapter1", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.adapter1", "base_model.model.lin0.lora_B.adapter1", "base_model.model.lin0.lora_E.adapter1", ) def test_requires_grad_lora_conv2d(self): # test two different LoRA adapters that target different modules config0 = LoraConfig(target_modules=["conv2d"]) peft_model = get_peft_model(ModelConv2D(), config0) config1 = LoraConfig(target_modules=["lin0"]) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.conv2d.lora_A.default.weight", "base_model.model.conv2d.lora_B.default.weight", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.conv2d.lora_A.default.weight", "base_model.model.conv2d.lora_B.default.weight", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.adapter1.weight", "base_model.model.lin0.lora_B.adapter1.weight", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state self.check_requires_grad( peft_model, "base_model.model.lin0.lora_A.adapter1.weight", "base_model.model.lin0.lora_B.adapter1.weight", ) def test_requires_grad_lora_emb_conv1d(self): # test two different LoRA adapters that target different modules config0 = LoraConfig(target_modules=["conv1d"]) peft_model = get_peft_model(ModelEmbConv1D(), config0) config1 = LoraConfig(target_modules=["emb"]) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.conv1d.lora_A.default.weight", "base_model.model.conv1d.lora_B.default.weight", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.conv1d.lora_A.default.weight", "base_model.model.conv1d.lora_B.default.weight", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.emb.lora_embedding_A.adapter1", "base_model.model.emb.lora_embedding_B.adapter1", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state self.check_requires_grad( peft_model, "base_model.model.emb.lora_embedding_A.adapter1", "base_model.model.emb.lora_embedding_B.adapter1", ) def test_requires_grad_ia3_conv1d(self): # test two different LoRA adapters that target different modules config0 = IA3Config(target_modules=["conv1d"], feedforward_modules=[]) peft_model = get_peft_model(ModelEmbConv1D(), config0) config1 = IA3Config(target_modules=["lin0"], feedforward_modules=["lin0"]) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.conv1d.ia3_l.default", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.conv1d.ia3_l.default", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.ia3_l.adapter1", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state self.check_requires_grad( peft_model, "base_model.model.lin0.ia3_l.adapter1", ) def test_requires_grad_ia3_conv2d(self): # test two different LoRA adapters that target different modules config0 = IA3Config(target_modules=["conv2d"], feedforward_modules=["conv2d"]) peft_model = get_peft_model(ModelConv2D(), config0) config1 = IA3Config(target_modules=["lin0"], feedforward_modules=[]) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.conv2d.ia3_l.default", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.conv2d.ia3_l.default", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.ia3_l.adapter1", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.ia3_l.adapter1", ) def test_requires_grad_loha_different_targets(self): # test two different LoHa adapters that target different modules config0 = LoHaConfig(target_modules=["lin0"]) peft_model = get_peft_model(MLP(), config0) config1 = LoHaConfig(target_modules=["lin1"], inference_mode=True) peft_model.add_adapter("adapter1", config1) # active pter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin0.hada_w1_a.default", "base_model.model.lin0.hada_w1_b.default", "base_model.model.lin0.hada_w2_a.default", "base_model.model.lin0.hada_w2_b.default", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin0.hada_w1_a.default", "base_model.model.lin0.hada_w1_b.default", "base_model.model.lin0.hada_w2_a.default", "base_model.model.lin0.hada_w2_b.default", ) # change activate pter to pter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin1.hada_w1_a.adapter1", "base_model.model.lin1.hada_w1_b.adapter1", "base_model.model.lin1.hada_w2_a.adapter1", "base_model.model.lin1.hada_w2_b.adapter1", ) # disable all pters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state self.check_requires_grad( peft_model, "base_model.model.lin1.hada_w1_a.adapter1", "base_model.model.lin1.hada_w1_b.adapter1", "base_model.model.lin1.hada_w2_a.adapter1", "base_model.model.lin1.hada_w2_b.adapter1", ) def test_requires_grad_loha_same_targets(self): # same as previous test, except that LoHa adapters target the same layer config0 = LoHaConfig(target_modules=["lin0"]) peft_model = get_peft_model(MLP(), config0) config1 = LoHaConfig(target_modules=["lin0"], inference_mode=True) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin0.hada_w1_a.default", "base_model.model.lin0.hada_w1_b.default", "base_model.model.lin0.hada_w2_a.default", "base_model.model.lin0.hada_w2_b.default", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin0.hada_w1_a.default", "base_model.model.lin0.hada_w1_b.default", "base_model.model.lin0.hada_w2_a.default", "base_model.model.lin0.hada_w2_b.default", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.hada_w1_a.adapter1", "base_model.model.lin0.hada_w1_b.adapter1", "base_model.model.lin0.hada_w2_a.adapter1", "base_model.model.lin0.hada_w2_b.adapter1", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.hada_w1_a.adapter1", "base_model.model.lin0.hada_w1_b.adapter1", "base_model.model.lin0.hada_w2_a.adapter1", "base_model.model.lin0.hada_w2_b.adapter1", ) def test_requires_grad_lokr_different_targets(self): # test two different LoKr adapters that target different modules config0 = LoKrConfig(target_modules=["lin0"]) peft_model = get_peft_model(MLP(), config0) config1 = LoKrConfig(target_modules=["lin1"], inference_mode=True) peft_model.add_adapter("adapter1", config1) # active pter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin0.lokr_w1.default", "base_model.model.lin0.lokr_w2.default", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin0.lokr_w1.default", "base_model.model.lin0.lokr_w2.default", ) # change activate pter to pter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin1.lokr_w1.adapter1", "base_model.model.lin1.lokr_w2.adapter1", ) # disable all pters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state self.check_requires_grad( peft_model, "base_model.model.lin1.lokr_w1.adapter1", "base_model.model.lin1.lokr_w2.adapter1", ) def test_requires_grad_lokr_same_targets(self): # same as previous test, except that LoKr adapters target the same layer config0 = LoKrConfig(target_modules=["lin0"]) peft_model = get_peft_model(MLP(), config0) config1 = LoKrConfig(target_modules=["lin0"], inference_mode=True) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin0.lokr_w1.default", "base_model.model.lin0.lokr_w2.default", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin0.lokr_w1.default", "base_model.model.lin0.lokr_w2.default", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.lokr_w1.adapter1", "base_model.model.lin0.lokr_w2.adapter1", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.lokr_w1.adapter1", "base_model.model.lin0.lokr_w2.adapter1", ) def test_requires_grad_oft_different_targets(self): # test two different OFT adapters that target different modules config0 = OFTConfig(target_modules=["lin0"]) peft_model = get_peft_model(MLP(), config0) config1 = OFTConfig(target_modules=["lin1"], inference_mode=True) peft_model.add_adapter("adapter1", config1) # active pter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin0.oft_r.default", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin0.oft_r.default", ) # change activate pter to pter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin1.oft_r.adapter1", ) # disable all pters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state self.check_requires_grad( peft_model, "base_model.model.lin1.oft_r.adapter1", ) def test_requires_grad_oft_same_targets(self): # same as previous test, except that OFT adapters target the same layer config0 = OFTConfig(target_modules=["lin0"]) peft_model = get_peft_model(MLP(), config0) config1 = OFTConfig(target_modules=["lin0"], inference_mode=True) peft_model.add_adapter("adapter1", config1) # active adapter is still "default" self.check_requires_grad( peft_model, "base_model.model.lin0.oft_r.default", ) # set config0 as active, should not change anything peft_model.set_adapter("default") self.check_requires_grad( peft_model, "base_model.model.lin0.oft_r.default", ) # change activate adapter to adapter1 peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.oft_r.adapter1", ) # disable all adapters with peft_model.disable_adapter(): self.check_requires_grad(peft_model) # after context is exited, return to the previous state peft_model.set_adapter("adapter1") self.check_requires_grad( peft_model, "base_model.model.lin0.oft_r.adapter1", )
0
hf_public_repos/peft/tests
hf_public_repos/peft/tests/regression/test_regression.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Regression testing: check that checkpoints from previous PEFT versions still return the same values. # # For normal regression testing, just run: # # `pytest tests/regression/test_regression.py -s --regression` # # Add `-s` to show potentially useful debugging information. `--regression` is a custom marker that is required for # regression tests not to be skipped. # # To create new regression tests, run: # `HF_TOKEN=<token> REGRESSION_CREATION_MODE=True pytest tests/regression/test_regression.py -s --regression` # # This will *fail* if: # # 1. the git worktree is dirty # 2. the git commit is not tagged # # Note: A Hugging Face Hub token is required to upload the regression artifacts to our # https://huggingface.co/peft-internal-testing repo. This can be done by anyone with write access to the repo but # apparently it is not possible to create a technical token with write access. # # This is important to ensure that the regression artifacts correspond to a specific released version of PEFT. # Therefore, it is recommended to checkout the tag before running the regression tests, e.g. by running: # # `git checkout v0.1.0` # # To override these checks, run: # ``HF_TOKEN=<token> REGRESSION_CREATION_MODE=True REGRESSION_FORCE_MODE=True pytest tests/regression/test_regression.py -s --regression` # # In REGRESSION_CREATION_MODE, one directory will be created in tests/regression/<TEST_NAME>/<PEFT_VERSION>/ for each # test. This will contain the saved adapter, as well as the output of the test of the model for that version. # # In normal testing mode, the saved adapter and output for each version found in the directory # tests/regression/<TEST_NAME>/ will be loaded and compared to the current output. # # When implementing new tests, check the existing ones as well as the description in the docstring of RegressionTester. import os import shutil import subprocess import sys import tempfile import unittest import pytest import torch from huggingface_hub import snapshot_download, upload_folder from torch import nn from transformers import AutoModelForCausalLM, BitsAndBytesConfig from transformers.pytorch_utils import Conv1D import peft from peft import AdaLoraConfig, IA3Config, LoHaConfig, LoKrConfig, LoraConfig, PeftModel, get_peft_model from peft.utils import infer_device PEFT_VERSION = peft.__version__ REGRESSION_DIR = tempfile.mkdtemp(prefix="peft_regression_") HF_TOKEN = os.environ.get("HF_TOKEN") # the repo has to be created manually once, it is not automatically created HF_REPO = "peft-internal-testing/regression-tests" @pytest.fixture(scope="session", autouse=True) def setup_tearndown(): # Use a pytest session-scoped fixture to setup and teardown exactly once per session. AFAICT, unittest does not # provide such a feature # download regression artifacts from Hugging Face Hub at the start snapshot_download( repo_id=HF_REPO, local_dir=REGRESSION_DIR, # Don't use symlink, because this prevents us from properly cleaning up the files once finished local_dir_use_symlinks=False, ) yield # delete regression artifacts at the end of the test session; optionally, upload them first if in creation mode creation_mode = strtobool(os.environ.get("REGRESSION_CREATION_MODE", "False")) if creation_mode: # upload the regression directory to Hugging Face Hub, will overwrite by default upload_folder( repo_id=HF_REPO, folder_path=REGRESSION_DIR, token=HF_TOKEN, ) shutil.rmtree(REGRESSION_DIR) def strtobool(val): """Copied from distutils.util""" val = val.lower() if val in ("y", "yes", "t", "true", "on", "1"): return 1 elif val in ("n", "no", "f", "false", "off", "0"): return 0 else: raise ValueError("invalid truth value {!r}".format(val)) # same as in ..testing_utils.py but cannot be imported def require_torch_gpu(test_case): """ Decorator marking a test that requires a GPU. Will be skipped when no GPU is available. Copies from tsting_utils.py. """ if not torch.cuda.is_available(): return unittest.skip("test requires GPU")(test_case) else: return test_case # same as in ..testing_utils.py but cannot be imported def require_bitsandbytes(test_case): """ Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library is not installed. Copies from tsting_utils.py. """ try: import bitsandbytes # noqa: F401 except ImportError: return unittest.skip("test requires bitsandbytes")(test_case) else: return test_case def save_output(output, name, force=False): path = os.path.join(REGRESSION_DIR, name, PEFT_VERSION) filename = os.path.join(path, "output.pt") if os.path.exists(filename) and not force: return if not os.path.exists(path): os.makedirs(path) if os.path.exists(filename) and force: print(f"Overriding existing output in {filename}", file=sys.stderr) torch.save(output, filename) def save_model(model, name, force=False): path = os.path.join(REGRESSION_DIR, name, PEFT_VERSION) filename = os.path.join(path, peft.utils.SAFETENSORS_WEIGHTS_NAME) if os.path.exists(filename) and not force: return if not os.path.exists(path): os.makedirs(path) if os.path.exists(filename) and force: print(f"Overriding existing model in {path}", file=sys.stderr) model.save_pretrained(path) def load_output(name): filename = os.path.join(REGRESSION_DIR, name, "output.pt") return torch.load(filename) @pytest.mark.regression class RegressionTester(unittest.TestCase): """Base class for regression testing Child classes must call assert_results_equal_or_store and pass the model outtput, as well as a unique name that describes the setting (e.g. "lora_opt-350m_bnb_4bit"). They also need to implement get_output(model) to get the model output, and load_base_model(name) to load the base model. Don't forget to fix the seed in load_base_model. """ torch_device = infer_device() def setUp(self): self.tol = 1e-4 self.creation_mode = strtobool(os.environ.get("REGRESSION_CREATION_MODE", "False")) self.force_mode = strtobool(os.environ.get("REGRESSION_FORCE_MODE", "False")) if self.force_mode and not self.creation_mode: raise RuntimeError("REGRESSION_FORCE_MODE can only be used together with REGRESSION_CREATION_MODE") if self.creation_mode: self.check_clean_git_status(self.force_mode) if HF_TOKEN is None: raise RuntimeError("HF_TOKEN environment variable must be set in creation mode") def fix_seed(self): torch.manual_seed(0) def check_clean_git_status(self, force): """Ensure that worktree is not dirty and version tag is checked out""" # check that the worktree is clean try: subprocess.check_output(["git", "diff", "--quiet", "HEAD"]) except subprocess.CalledProcessError as exc: if force: print("Overriding despite dirty git worktree", file=sys.stderr) else: raise RuntimeError("Git worktree is dirty") from exc # check that the commit is tagged try: subprocess.check_output(["git", "describe", "--exact-match", "HEAD"]) except subprocess.CalledProcessError as exc: if force: print("Overriding despite non-tagged commit", file=sys.stderr) else: raise RuntimeError("Git commit is not tagged") from exc def assert_results_equal_or_store(self, model, name): """Check if the outputs are the same or save the outputs if in creation mode.""" if not self.creation_mode: # normal regression testing mode self._assert_results_equal(name) else: output = self.get_output(model) if not torch.isfinite(output).all(): raise RuntimeError(f"Model output for {name} is not finite") output2 = self.get_output(model) if not torch.allclose(output, output2): raise RuntimeError(f"Model output for {name} is not deterministic") save_output(output, name, force=self.force_mode) save_model(model, name, force=self.force_mode) def _assert_results_equal(self, name): path = os.path.join(REGRESSION_DIR, name) versions = os.listdir(path) for version in versions: # each directory corresponds to a version output_loaded = load_output(os.path.join(name, version)) base_model = self.load_base_model() model = PeftModel.from_pretrained(base_model, os.path.join(path, version)) output = self.get_output(model) self.assertTrue(torch.allclose(output_loaded, output, atol=self.tol, rtol=self.tol)) def get_output(self, model): raise NotImplementedError def load_base_model(self): raise NotImplementedError ############## # TEST CASES # ############## class TestMlp(RegressionTester): def get_output(self, model): input = torch.arange(90).reshape(9, 10).to(self.torch_device) with torch.inference_mode(): output = model(input) return output def load_base_model(self): class MLP(nn.Module): def __init__(self, bias=True): super().__init__() self.lin0 = nn.Linear(10, 20, bias=bias) self.relu = nn.ReLU() self.lin1 = nn.Linear(20, 2, bias=bias) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = X.float() X = self.lin0(X) X = self.relu(X) X = self.lin1(X) X = self.sm(X) return X self.fix_seed() return MLP().to(self.torch_device) def test_lora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_mlp") def test_adalora(self): base_model = self.load_base_model() config = AdaLoraConfig( r=8, init_lora_weights=False, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "adalora_mlp") def test_ia3(self): base_model = self.load_base_model() config = IA3Config( init_ia3_weights=False, target_modules=["lin0"], feedforward_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ia3_mlp") def test_ia3_no_ff(self): base_model = self.load_base_model() config = IA3Config( init_ia3_weights=False, target_modules=["lin0"], feedforward_modules=[], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ia3_no_ff_mlp") def test_loha(self): # TODO self.skipTest("Skipping LoHa for now because init is not seedable") base_model = self.load_base_model() config = LoHaConfig( r=8, init_weights=False, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "loha_mlp") def test_lokr(self): # TODO self.skipTest("Skipping LoKr for now because init is not seedable") base_model = self.load_base_model() config = LoKrConfig( r=8, target_modules=["lin0"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lokr_mlp") def test_lora_modules_to_save(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["lin0"], modules_to_save=["lin1"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_mlp_modules_to_save") class TestLoraEmbConv1D(RegressionTester): def get_output(self, model): input = torch.arange(90).reshape(9, 10).to(self.torch_device) with torch.inference_mode(): output = model(input) return output def load_base_model(self): class ModelEmbConv1D(nn.Module): def __init__(self): super().__init__() self.emb = nn.Embedding(100, 5) self.conv1d = Conv1D(1, 5) self.relu = nn.ReLU() self.flat = nn.Flatten() self.lin0 = nn.Linear(10, 2) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = self.emb(X) X = self.conv1d(X) X = self.relu(X) X = self.flat(X) X = self.lin0(X) X = self.sm(X) return X self.fix_seed() return ModelEmbConv1D().to(self.torch_device) def test_lora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["emb", "conv1d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_emb_conv1d") class TestLoraConv2D(RegressionTester): def get_output(self, model): input = torch.arange(90).reshape(9, 10).to(self.torch_device) with torch.inference_mode(): output = model(input) return output def load_base_model(self): class ModelConv2D(nn.Module): def __init__(self): super().__init__() self.conv2d = nn.Conv2d(5, 10, 3) self.relu = nn.ReLU() self.flat = nn.Flatten() self.lin0 = nn.Linear(10, 2) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = X.float().reshape(2, 5, 3, 3) X = self.conv2d(X) X = self.relu(X) X = self.flat(X) X = self.lin0(X) X = self.sm(X) return X self.fix_seed() return ModelConv2D().to(self.torch_device) def test_lora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, target_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_conv2d") def test_ia3(self): base_model = self.load_base_model() config = IA3Config( init_ia3_weights=False, target_modules=["conv2d"], feedforward_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ia3_conv2d") def test_loha(self): # TODO self.skipTest("Skipping LoHa for now because init is not seedable") base_model = self.load_base_model() config = LoHaConfig( r=8, init_weights=False, target_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "loha_conv2d") def test_lokr(self): # TODO self.skipTest("Skipping LoKr for now because init is not seedable") base_model = self.load_base_model() config = LoKrConfig( r=8, init_weights=False, target_modules=["conv2d"], ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lokr_conv2d") class TestOpt(RegressionTester): def get_output(self, model): input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device) with torch.inference_mode(): output = model(input).logits return output def load_base_model(self): self.fix_seed() return AutoModelForCausalLM.from_pretrained("facebook/opt-350m").to(self.torch_device) def test_lora(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_opt-350m") def test_adalora(self): base_model = self.load_base_model() config = AdaLoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "adalora_opt-350m") def test_ia3(self): base_model = self.load_base_model() config = IA3Config(init_ia3_weights=False) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "ia3_opt-350m") @require_torch_gpu @require_bitsandbytes class TestOpt8bitBnb(RegressionTester): def get_output(self, model): input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device) with torch.inference_mode(): output = model(input).logits return output def load_base_model(self): self.fix_seed() model = AutoModelForCausalLM.from_pretrained( "facebook/opt-350m", load_in_8bit=True, ) return model def test_lora_8bit(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_opt-350m_bnb_8bit") def test_adalora(self): # TODO self.skipTest( "Skipping AdaLora for now, getting TypeError: unsupported operand type(s) for +=: 'dict' and 'Tensor'" ) base_model = self.load_base_model() config = AdaLoraConfig( init_r=6, target_r=4, tinit=50, tfinal=100, deltaT=5, beta1=0.3, beta2=0.3, orth_reg_weight=0.2, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "adalora_opt-350m_8bit") @require_torch_gpu @require_bitsandbytes class TestOpt4bitBnb(RegressionTester): def get_output(self, model): input = torch.LongTensor([[1, 0, 1, 0, 1, 2]]).to(self.torch_device) with torch.inference_mode(): output = model(input).logits return output def load_base_model(self): self.fix_seed() bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=False, bnb_4bit_compute_type=torch.float32, ) model = AutoModelForCausalLM.from_pretrained( "facebook/opt-350m", quantization_config=bnb_config, torch_dtype=torch.float32, ) return model def test_lora_4bit(self): base_model = self.load_base_model() config = LoraConfig( r=8, init_lora_weights=False, ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "lora_opt-350m_bnb_4bit") def test_adalora(self): # TODO self.skipTest("Skipping AdaLora for now because of a bug, see #1113") base_model = self.load_base_model() config = AdaLoraConfig( init_r=6, target_r=4, tinit=50, tfinal=100, deltaT=5, beta1=0.3, beta2=0.3, orth_reg_weight=0.2, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(base_model, config) self.assert_results_equal_or_store(model, "adalora_opt-350m_4bit")
0
hf_public_repos/peft
hf_public_repos/peft/docs/Makefile
# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SOURCEDIR = source BUILDDIR = _build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
0
hf_public_repos/peft
hf_public_repos/peft/docs/README.md
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Generating the documentation To generate the documentation, you first have to build it. Several packages are necessary to build the doc, you can install them with the following command, at the root of the code repository: ```bash pip install -e ".[docs]" ``` Then you need to install our special tool that builds the documentation: ```bash pip install git+https://github.com/huggingface/doc-builder ``` --- **NOTE** You only need to generate the documentation to inspect it locally (if you're planning changes and want to check how they look before committing for instance). You don't have to commit to the built documentation. --- ## Building the documentation Once you have setup the `doc-builder` and additional packages, you can generate the documentation by typing the following command: ```bash doc-builder build peft docs/source/ --build_dir ~/tmp/test-build ``` You can adapt the `--build_dir` to set any temporary folder you prefer. This command will create it and generate the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite Markdown editor. ## Previewing the documentation To preview the docs, first install the `watchdog` module with: ```bash pip install watchdog ``` Then run the following command: ```bash doc-builder preview {package_name} {path_to_docs} ``` For example: ```bash doc-builder preview peft docs/source ``` The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. --- **NOTE** The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). --- ## Adding a new element to the navigation bar Accepted files are Markdown (.md or .mdx). Create a file with its extension and put it in the source directory. You can then link it to the toc-tree by putting the filename without the extension in the [`_toctree.yml`](https://github.com/huggingface/peft/blob/main/docs/source/_toctree.yml) file. ## Renaming section headers and moving sections It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information. Therefore, we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor. So if you renamed a section from: "Section A" to "Section B", then you can add at the end of the file: ``` Sections that were moved: [ <a href="#section-b">Section A</a><a id="section-a"></a> ] ``` and of course, if you moved it to another file, then: ``` Sections that were moved: [ <a href="../new-file#section-b">Section A</a><a id="section-a"></a> ] ``` Use the relative style to link to the new file so that the versioned docs continue to work. ## Writing Documentation - Specification The `huggingface/peft` documentation follows the [Google documentation](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) style for docstrings, although we can write them directly in Markdown. ### Adding a new tutorial Adding a new tutorial or section is done in two steps: - Add a new file under `./source`. This file can either be ReStructuredText (.rst) or Markdown (.md). - Link that file in `./source/_toctree.yml` on the correct toc-tree. Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so depending on the intended targets (beginners, more advanced users, or researchers) it should go into sections two, three, or four. ### Writing source documentation Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names and objects like True, None, or any strings should usually be put in `code`. When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or function to be in the main package. If you want to create a link to some internal class or function, you need to provide its path. For instance: \[\`utils.gather\`\]. This will be converted into a link with `utils.gather` in the description. To get rid of the path and only keep the name of the object you are linking to in the description, add a ~: \[\`~utils.gather\`\] will generate a link with `gather` in the description. The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\`XXXClass.method\`\]. #### Defining arguments in a method Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its description: ``` Args: n_layers (`int`): The number of layers of the model. ``` If the description is too long to fit in one line (more than 119 characters in total), another indentation is necessary before writing the description after the argument. Finally, to maintain uniformity if any *one* description is too long to fit on one line, the rest of the parameters should follow suit and have an indention before their description. Here's an example showcasing everything so far: ``` Args: gradient_accumulation_steps (`int`, *optional*, default to 1): The number of steps that should pass before gradients are accumulated. A number > 1 should be combined with `Accelerator.accumulate`. cpu (`bool`, *optional*): Whether or not to force the script to execute on CPU. Will ignore GPU available if set to `True` and force the execution on one process only. ``` For optional arguments or arguments with defaults we follow the following syntax: imagine we have a function with the following signature: ``` def my_function(x: str = None, a: float = 1): ``` then its documentation should look like this: ``` Args: x (`str`, *optional*): This argument controls ... and has a description longer than 119 chars. a (`float`, *optional*, defaults to 1): This argument is used to ... and has a description longer than 119 chars. ``` Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even if the first line describing your argument type and its default gets long, you can't break it into several lines. You can however write as many lines as you want in the indented description (see the example above with `input_ids`). #### Writing a multi-line code block Multi-line code blocks can be useful for displaying examples. They are done between two lines of three backticks as usual in Markdown: ```` ```python # first line of code # second line # etc ``` ```` #### Writing a return block The return block should be introduced with the `Returns:` prefix, followed by a line return and an indentation. The first line should be the type of the return, followed by a line return. No need to indent further for the elements building the return. Here's an example of a single value return: ``` Returns: `List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. ``` Here's an example of a tuple return, comprising several objects: ``` Returns: `tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs: - ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` -- Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss. - **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) -- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``` ## Styling the docstring We have an automatic script running with the `make style` comment that will make sure that: - the docstrings fully take advantage of the line width - all code examples are formatted using black, like the code of the Transformers library This script may have some weird failures if you make a syntax mistake or if you uncover a bug. Therefore, it's recommended to commit your changes before running `make style`, so you can revert the changes done by that script easily. ## Writing documentation examples The syntax, for example, docstrings can look as follows: ``` Example: ```python >>> import time >>> from accelerate import Accelerator >>> accelerator = Accelerator() >>> if accelerator.is_main_process: ... time.sleep(2) >>> else: ... print("I'm waiting for the main process to finish its sleep...") >>> accelerator.wait_for_everyone() >>> # Should print on every process at the same time >>> print("Everyone is here") ``` ``` The docstring should give a minimal, clear example of how the respective function is to be used in inference and also include the expected (ideally sensible) output. Often, readers will try out the example before even going through the function or class definitions. Therefore, it is of utmost importance that the example works as expected.
0
hf_public_repos/peft/docs
hf_public_repos/peft/docs/source/quicktour.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Quicktour PEFT offers parameter-efficient methods for finetuning large pretrained models. The traditional paradigm is to finetune all of a model's parameters for each downstream task, but this is becoming exceedingly costly and impractical because of the enormous number of parameters in models today. Instead, it is more efficient to train a smaller number of prompt parameters or use a reparametrization method like low-rank adaptation (LoRA) to reduce the number of trainable parameters. This quicktour will show you PEFT's main features and how you can train or run inference on large models that would typically be inaccessible on consumer devices. ## Train Each PEFT method is defined by a [`PeftConfig`] class that stores all the important parameters for building a [`PeftModel`]. For example, to train with LoRA, load and create a [`LoraConfig`] class and specify the following parameters: - `task_type`: the task to train for (sequence-to-sequence language modeling in this case) - `inference_mode`: whether you're using the model for inference or not - `r`: the dimension of the low-rank matrices - `lora_alpha`: the scaling factor for the low-rank matrices - `lora_dropout`: the dropout probability of the LoRA layers ```python from peft import LoraConfig, TaskType peft_config = LoraConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1) ``` <Tip> See the [`LoraConfig`] reference for more details about other parameters you can adjust, such as the modules to target or the bias type. </Tip> Once the [`LoraConfig`] is setup, create a [`PeftModel`] with the [`get_peft_model`] function. It takes a base model - which you can load from the Transformers library - and the [`LoraConfig`] containing the parameters for how to configure a model for training with LoRA. Load the base model you want to finetune. ```python from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/mt0-large") ``` Wrap the base model and `peft_config` with the [`get_peft_model`] function to create a [`PeftModel`]. To get a sense of the number of trainable parameters in your model, use the [`print_trainable_parameters`] method. ```python from peft import get_peft_model model = get_peft_model(model, peft_config) model.print_trainable_parameters() "output: trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282" ``` Out of [bigscience/mt0-large's](https://huggingface.co/bigscience/mt0-large) 1.2B parameters, you're only training 0.19% of them! That is it 🎉! Now you can train the model with the Transformers [`~transformers.Trainer`], Accelerate, or any custom PyTorch training loop. For example, to train with the [`~transformers.Trainer`] class, setup a [`~transformers.TrainingArguments`] class with some training hyperparameters. ```py training_args = TrainingArguments( output_dir="your-name/bigscience/mt0-large-lora", learning_rate=1e-3, per_device_train_batch_size=32, per_device_eval_batch_size=32, num_train_epochs=2, weight_decay=0.01, evaluation_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, ) ``` Pass the model, training arguments, dataset, tokenizer, and any other necessary component to the [`~transformers.Trainer`], and call [`~transformers.Trainer.train`] to start training. ```py trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["test"], tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) trainer.train() ``` ### Save model After your model is finished training, you can save your model to a directory using the [`~transformers.PreTrainedModel.save_pretrained`] function. ```py model.save_pretrained("output_dir") ``` You can also save your model to the Hub (make sure you're logged in to your Hugging Face account first) with the [`~transformers.PreTrainedModel.push_to_hub`] function. ```python from huggingface_hub import notebook_login notebook_login() model.push_to_hub("your-name/bigscience/mt0-large-lora") ``` Both methods only save the extra PEFT weights that were trained, meaning it is super efficient to store, transfer, and load. For example, this [facebook/opt-350m](https://huggingface.co/ybelkada/opt-350m-lora) model trained with LoRA only contains two files: `adapter_config.json` and `adapter_model.safetensors`. The `adapter_model.safetensors` file is just 6.3MB! <div class="flex flex-col justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/> <figcaption class="text-center">The adapter weights for a opt-350m model stored on the Hub are only ~6MB compared to the full size of the model weights, which can be ~700MB.</figcaption> </div> ## Inference <Tip> Take a look at the [AutoPeftModel](package_reference/auto_class) API reference for a complete list of available `AutoPeftModel` classes. </Tip> Easily load any PEFT-trained model for inference with the [`AutoPeftModel`] class and the [`~transformers.PreTrainedModel.from_pretrained`] method: ```py from peft import AutoPeftModelForCausalLM from transformers import AutoTokenizer import torch model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora") tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") model = model.to("cuda") model.eval() inputs = tokenizer("Preheat the oven to 350 degrees and place the cookie dough", return_tensors="pt") outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=50) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0]) "Preheat the oven to 350 degrees and place the cookie dough in the center of the oven. In a large bowl, combine the flour, baking powder, baking soda, salt, and cinnamon. In a separate bowl, combine the egg yolks, sugar, and vanilla." ``` For other tasks that aren't explicitly supported with an `AutoPeftModelFor` class - such as automatic speech recognition - you can still use the base [`AutoPeftModel`] class to load a model for the task. ```py from peft import AutoPeftModel model = AutoPeftModel.from_pretrained("smangrul/openai-whisper-large-v2-LORA-colab") ``` ## Next steps Now that you've seen how to train a model with one of the PEFT methods, we encourage you to try out some of the other methods like prompt tuning. The steps are very similar to the ones shown in the quicktour: 1. prepare a [`PeftConfig`] for a PEFT method 2. use the [`get_peft_model`] method to create a [`PeftModel`] from the configuration and base model Then you can train it however you like! To load a PEFT model for inference, you can use the [`AutoPeftModel`] class. Feel free to also take a look at the task guides if you're interested in training a model with another PEFT method for a specific task such as semantic segmentation, multilingual automatic speech recognition, DreamBooth, token classification, and more.
0
hf_public_repos/peft/docs
hf_public_repos/peft/docs/source/install.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Installation Before you start, you will need to setup your environment, install the appropriate packages, and configure 🤗 PEFT. 🤗 PEFT is tested on **Python 3.8+**. 🤗 PEFT is available on PyPI, as well as GitHub: ## PyPI To install 🤗 PEFT from PyPI: ```bash pip install peft ``` ## Source New features that haven't been released yet are added every day, which also means there may be some bugs. To try them out, install from the GitHub repository: ```bash pip install git+https://github.com/huggingface/peft ``` If you're working on contributing to the library or wish to play with the source code and see live results as you run the code, an editable version can be installed from a locally-cloned version of the repository: ```bash git clone https://github.com/huggingface/peft cd peft pip install -e . ```
0
hf_public_repos/peft/docs
hf_public_repos/peft/docs/source/_toctree.yml
- title: Get started sections: - local: index title: 🤗 PEFT - local: quicktour title: Quicktour - local: install title: Installation - title: Tutorial sections: - local: tutorial/peft_model_config title: PEFT configurations and models - local: tutorial/peft_integrations title: PEFT integrations - title: Task guides sections: - local: task_guides/seq2seq-prefix-tuning title: Prefix tuning for conditional generation - local: task_guides/clm-prompt-tuning title: Prompt tuning for causal language modeling - local: task_guides/ptuning-seq-classification title: P-tuning for sequence classification - title: LoRA sections: - local: task_guides/image_classification_lora title: Image classification - local: task_guides/semantic_segmentation_lora title: Semantic segmentation - local: task_guides/token-classification-lora title: Token classification - local: task_guides/semantic-similarity-lora title: Semantic similarity - local: task_guides/int8-asr title: int8 training for automatic speech recognition - local: task_guides/dreambooth_lora title: DreamBooth - title: Developer guides sections: - local: developer_guides/custom_models title: Working with custom models - local: developer_guides/low_level_api title: PEFT low level API - local: developer_guides/mixed_models title: Mixing different adapter types - local: developer_guides/contributing title: Contributing to PEFT - local: developer_guides/troubleshooting title: Troubleshooting - title: 🤗 Accelerate integrations sections: - local: accelerate/deepspeed-zero3-offload title: DeepSpeed - local: accelerate/fsdp title: Fully Sharded Data Parallel - title: Conceptual guides sections: - local: conceptual_guides/lora title: LoRA - local: conceptual_guides/prompting title: Prompting - local: conceptual_guides/ia3 title: IA3 - sections: - sections: - local: package_reference/auto_class title: AutoPeftModel - local: package_reference/peft_model title: PEFT model - local: package_reference/peft_types title: PEFT types - local: package_reference/config title: Configuration - local: package_reference/tuners title: Tuner title: Main classes - sections: - local: package_reference/adalora title: AdaLoRA - local: package_reference/ia3 title: IA3 - local: package_reference/llama_adapter title: Llama-Adapter - local: package_reference/loha title: LoHa - local: package_reference/lokr title: LoKr - local: package_reference/lora title: LoRA - local: package_reference/adapter_utils title: LyCORIS - local: package_reference/multitask_prompt_tuning title: Multitask Prompt Tuning - local: package_reference/oft title: OFT - local: package_reference/p_tuning title: P-tuning - local: package_reference/prefix_tuning title: Prefix tuning - local: package_reference/prompt_tuning title: Prompt tuning title: Adapters title: API reference
0
hf_public_repos/peft/docs
hf_public_repos/peft/docs/source/index.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PEFT 🤗 PEFT (Parameter-Efficient Fine-Tuning) is a library for efficiently adapting large pretrained models to various downstream applications without fine-tuning all of a model's parameters because it is prohibitively costly. PEFT methods only fine-tune a small number of (extra) model parameters - significantly decreasing computational and storage costs - while yielding performance comparable to a fully fine-tuned model. This makes it more accessible to train and store large language models (LLMs) on consumer hardware. PEFT is integrated with the Transformers, Diffusers, and Accelerate libraries to provide a faster and easier way to load, train, and use large models for inference. <div class="mt-10"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="quicktour" ><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Get started</div> <p class="text-gray-700">Start here if you're new to 🤗 PEFT to get an overview of the library's main features, and how to train a model with a PEFT method.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./task_guides/image_classification_lora" ><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div> <p class="text-gray-700">Practical guides demonstrating how to apply various PEFT methods across different types of tasks like image classification, causal language modeling, automatic speech recognition, and more. Learn how to use 🤗 PEFT with the DeepSpeed and Fully Sharded Data Parallel scripts.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual_guides/lora" ><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div> <p class="text-gray-700">Get a better theoretical understanding of how LoRA and various soft prompting methods help reduce the number of trainable parameters to make training more efficient.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./package_reference/config" ><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div> <p class="text-gray-700">Technical descriptions of how 🤗 PEFT classes and methods work.</p> </a> </div> </div> <iframe src="https://stevhliu-peft-methods.hf.space" frameborder="0" width="850" height="620" ></iframe>
0
hf_public_repos/peft/docs
hf_public_repos/peft/docs/source/_config.py
# docstyle-ignore INSTALL_CONTENT = """ # PEFT installation ! pip install peft accelerate transformers # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/peft.git """
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/conceptual_guides/ia3.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # IA3 This conceptual guide gives a brief overview of [IA3](https://arxiv.org/abs/2205.05638), a parameter-efficient fine tuning technique that is intended to improve over [LoRA](./lora). To make fine-tuning more efficient, IA3 (Infused Adapter by Inhibiting and Amplifying Inner Activations) rescales inner activations with learned vectors. These learned vectors are injected in the attention and feedforward modules in a typical transformer-based architecture. These learned vectors are the only trainable parameters during fine-tuning, and thus the original weights remain frozen. Dealing with learned vectors (as opposed to learned low-rank updates to a weight matrix like LoRA) keeps the number of trainable parameters much smaller. Being similar to LoRA, IA3 carries many of the same advantages: * IA3 makes fine-tuning more efficient by drastically reducing the number of trainable parameters. (For T0, an IA3 model only has about 0.01% trainable parameters, while even LoRA has > 0.1%) * The original pre-trained weights are kept frozen, which means you can have multiple lightweight and portable IA3 models for various downstream tasks built on top of them. * Performance of models fine-tuned using IA3 is comparable to the performance of fully fine-tuned models. * IA3 does not add any inference latency because adapter weights can be merged with the base model. In principle, IA3 can be applied to any subset of weight matrices in a neural network to reduce the number of trainable parameters. Following the authors' implementation, IA3 weights are added to the key, value and feedforward layers of a Transformer model. To be specific, for transformer models, IA3 weights are added to the outputs of key and value layers, and to the input of the second feedforward layer in each transformer block. Given the target layers for injecting IA3 parameters, the number of trainable parameters can be determined based on the size of the weight matrices. ## Common IA3 parameters in PEFT As with other methods supported by PEFT, to fine-tune a model using IA3, you need to: 1. Instantiate a base model. 2. Create a configuration (`IA3Config`) where you define IA3-specific parameters. 3. Wrap the base model with `get_peft_model()` to get a trainable `PeftModel`. 4. Train the `PeftModel` as you normally would train the base model. `IA3Config` allows you to control how IA3 is applied to the base model through the following parameters: - `target_modules`: The modules (for example, attention blocks) to apply the IA3 vectors. - `feedforward_modules`: The list of modules to be treated as feedforward layers in `target_modules`. While learned vectors are multiplied with the output activation for attention blocks, the vectors are multiplied with the input for classic feedforward layers. Note that `feedforward_modules` must be a subset of `target_modules`. - `modules_to_save`: List of modules apart from IA3 layers to be set as trainable and saved in the final checkpoint. These typically include model's custom head that is randomly initialized for the fine-tuning task. ## Example Usage For the task of sequence classification, one can initialize the IA3 config for a Llama model as follows: ```py peft_config = IA3Config( task_type=TaskType.SEQ_CLS, target_modules=["k_proj", "v_proj", "down_proj"], feedforward_modules=["down_proj"] ) ```
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/conceptual_guides/lora.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LoRA This conceptual guide gives a brief overview of [LoRA](https://arxiv.org/abs/2106.09685), a technique that accelerates the fine-tuning of large models while consuming less memory. To make fine-tuning more efficient, LoRA's approach is to represent the weight updates with two smaller matrices (called **update matrices**) through low-rank decomposition. These new matrices can be trained to adapt to the new data while keeping the overall number of changes low. The original weight matrix remains frozen and doesn't receive any further adjustments. To produce the final results, both the original and the adapted weights are combined. This approach has a number of advantages: * LoRA makes fine-tuning more efficient by drastically reducing the number of trainable parameters. * The original pre-trained weights are kept frozen, which means you can have multiple lightweight and portable LoRA models for various downstream tasks built on top of them. * LoRA is orthogonal to many other parameter-efficient methods and can be combined with many of them. * Performance of models fine-tuned using LoRA is comparable to the performance of fully fine-tuned models. * LoRA does not add any inference latency because adapter weights can be merged with the base model. In principle, LoRA can be applied to any subset of weight matrices in a neural network to reduce the number of trainable parameters. However, for simplicity and further parameter efficiency, in Transformer models LoRA is typically applied to attention blocks only. The resulting number of trainable parameters in a LoRA model depends on the size of the low-rank update matrices, which is determined mainly by the rank `r` and the shape of the original weight matrix. ## Merge LoRA weights into the base model While LoRA is significantly smaller and faster to train, you may encounter latency issues during inference due to separately loading the base model and the LoRA model. To eliminate latency, use the [`~LoraModel.merge_and_unload`] function to merge the adapter weights with the base model which allows you to effectively use the newly merged model as a standalone model. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/lora_diagram.png"/> </div> This works because during training, the smaller weight matrices (*A* and *B* in the diagram above) are separate. But once training is complete, the weights can actually be merged into a new weight matrix that is identical. ## Utils for LoRA Use [`~LoraModel.merge_adapter`] to merge the LoRa layers into the base model while retaining the PeftModel. This will help in later unmerging, deleting, loading different adapters and so on. Use [`~LoraModel.unmerge_adapter`] to unmerge the LoRa layers from the base model while retaining the PeftModel. This will help in later merging, deleting, loading different adapters and so on. Use [`~LoraModel.unload`] to get back the base model without the merging of the active lora modules. This will help when you want to get back the pretrained base model in some applications when you want to reset the model to its original state. For example, in Stable Diffusion WebUi, when the user wants to infer with base model post trying out LoRAs. Use [`~LoraModel.delete_adapter`] to delete an existing adapter. Use [`~LoraModel.add_weighted_adapter`] to combine multiple LoRAs into a new adapter based on the user provided weighing scheme. ## Common LoRA parameters in PEFT As with other methods supported by PEFT, to fine-tune a model using LoRA, you need to: 1. Instantiate a base model. 2. Create a configuration (`LoraConfig`) where you define LoRA-specific parameters. 3. Wrap the base model with `get_peft_model()` to get a trainable `PeftModel`. 4. Train the `PeftModel` as you normally would train the base model. `LoraConfig` allows you to control how LoRA is applied to the base model through the following parameters: - `r`: the rank of the update matrices, expressed in `int`. Lower rank results in smaller update matrices with fewer trainable parameters. - `target_modules`: The modules (for example, attention blocks) to apply the LoRA update matrices. - `alpha`: LoRA scaling factor. - `bias`: Specifies if the `bias` parameters should be trained. Can be `'none'`, `'all'` or `'lora_only'`. - `modules_to_save`: List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. These typically include model's custom head that is randomly initialized for the fine-tuning task. - `layers_to_transform`: List of layers to be transformed by LoRA. If not specified, all layers in `target_modules` are transformed. - `layers_pattern`: Pattern to match layer names in `target_modules`, if `layers_to_transform` is specified. By default `PeftModel` will look at common layer pattern (`layers`, `h`, `blocks`, etc.), use it for exotic and custom models. - `rank_pattern`: The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. - `alpha_pattern`: The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. ## LoRA examples For an example of LoRA method application to various downstream tasks, please refer to the following guides: * [Image classification using LoRA](../task_guides/image_classification_lora) * [Semantic segmentation](../task_guides/semantic_segmentation_lora) While the original paper focuses on language models, the technique can be applied to any dense layers in deep learning models. As such, you can leverage this technique with diffusion models. See [Dreambooth fine-tuning with LoRA](../task_guides/task_guides/dreambooth_lora) task guide for an example. ## Initialization options The initialization of LoRA weights is controlled by the parameter `init_lora_weights` of the `LoraConfig`. By default, PEFT initializes LoRA weights the same way as the [reference implementation](https://github.com/microsoft/LoRA), i.e. using Kaiming-uniform for weight A and initializing weight B as zeros, resulting in an identity transform. It is also possible to pass `init_lora_weights="gaussian"`. As the name suggests, this results in initializing weight A with a Gaussian distribution (weight B is still zeros). This corresponds to the way that [diffusers](https://huggingface.co/docs/diffusers/index) initializes LoRA weights. When quantizing the base model, e.g. for QLoRA training, consider using the [LoftQ initialization](https://arxiv.org/abs/2310.08659), which has been shown to improve the performance with quantization. The idea is that the LoRA weights are initialized such that the quantization error is minimized. To use this option, *do not* quantize the base model. Instead, proceed as follows: ```python from peft import LoftQConfig, LoraConfig, get_peft_model base_model = AutoModelForCausalLM.from_pretrained(...) # don't quantize here loftq_config = LoftQConfig(loftq_bits=4, ...) # set 4bit quantization lora_config = LoraConfig(..., init_lora_weights="loftq", loftq_config=loftq_config) peft_model = get_peft_model(base_model, lora_config) ``` Finally, there is also an option to set `initialize_lora_weights=False`. When choosing this option, the LoRA weights are initialized such that they do *not* result in an identity transform. This is useful for debugging and testing purposes and should not be used otherwise.
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/conceptual_guides/prompting.md
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Prompting Training large pretrained language models is very time-consuming and compute-intensive. As they continue to grow in size, there is increasing interest in more efficient training methods such as *prompting*. Prompting primes a frozen pretrained model for a specific downstream task by including a text prompt that describes the task or even demonstrates an example of the task. With prompting, you can avoid fully training a separate model for each downstream task, and use the same frozen pretrained model instead. This is a lot easier because you can use the same model for several different tasks, and it is significantly more efficient to train and store a smaller set of prompt parameters than to train all the model's parameters. There are two categories of prompting methods: - hard prompts are manually handcrafted text prompts with discrete input tokens; the downside is that it requires a lot of effort to create a good prompt - soft prompts are learnable tensors concatenated with the input embeddings that can be optimized to a dataset; the downside is that they aren't human readable because you aren't matching these "virtual tokens" to the embeddings of a real word This conceptual guide provides a brief overview of the soft prompt methods included in 🤗 PEFT: prompt tuning, prefix tuning, and P-tuning. ## Prompt tuning <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/prompt-tuning.png"/> </div> <small>Only train and store a significantly smaller set of task-specific prompt parameters <a href="https://arxiv.org/abs/2104.08691">(image source)</a>.</small> Prompt tuning was developed for text classification tasks on T5 models, and all downstream tasks are cast as a text generation task. For example, sequence classification usually assigns a single class label to a sequence of text. By casting it as a text generation task, the tokens that make up the class label are *generated*. Prompts are added to the input as a series of tokens. Typically, the model parameters are fixed which means the prompt tokens are also fixed by the model parameters. The key idea behind prompt tuning is that prompt tokens have their own parameters that are updated independently. This means you can keep the pretrained model's parameters frozen, and only update the gradients of the prompt token embeddings. The results are comparable to the traditional method of training the entire model, and prompt tuning performance scales as model size increases. Take a look at [Prompt tuning for causal language modeling](../task_guides/clm-prompt-tuning) for a step-by-step guide on how to train a model with prompt tuning. ## Prefix tuning <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/prefix-tuning.png"/> </div> <small>Optimize the prefix parameters for each task <a href="https://arxiv.org/abs/2101.00190">(image source)</a>.</small> Prefix tuning was designed for natural language generation (NLG) tasks on GPT models. It is very similar to prompt tuning; prefix tuning also prepends a sequence of task-specific vectors to the input that can be trained and updated while keeping the rest of the pretrained model's parameters frozen. The main difference is that the prefix parameters are inserted in **all** of the model layers, whereas prompt tuning only adds the prompt parameters to the model input embeddings. The prefix parameters are also optimized by a separate feed-forward network (FFN) instead of training directly on the soft prompts because it causes instability and hurts performance. The FFN is discarded after updating the soft prompts. As a result, the authors found that prefix tuning demonstrates comparable performance to fully finetuning a model, despite having 1000x fewer parameters, and it performs even better in low-data settings. Take a look at [Prefix tuning for conditional generation](../task_guides/seq2seq-prefix-tuning) for a step-by-step guide on how to train a model with prefix tuning. ## P-tuning <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/p-tuning.png"/> </div> <small>Prompt tokens can be inserted anywhere in the input sequence, and they are optimized by a prompt encoder <a href="https://arxiv.org/abs/2103.10385">(image source)</a>.</small> P-tuning is designed for natural language understanding (NLU) tasks and all language models. It is another variation of a soft prompt method; P-tuning also adds a trainable embedding tensor that can be optimized to find better prompts, and it uses a prompt encoder (a bidirectional long-short term memory network or LSTM) to optimize the prompt parameters. Unlike prefix tuning though: - the prompt tokens can be inserted anywhere in the input sequence, and it isn't restricted to only the beginning - the prompt tokens are only added to the input instead of adding them to every layer of the model - introducing *anchor* tokens can improve performance because they indicate characteristics of a component in the input sequence The results suggest that P-tuning is more efficient than manually crafting prompts, and it enables GPT-like models to compete with BERT-like models on NLU tasks. Take a look at [P-tuning for sequence classification](../task_guides/ptuning-seq-classification) for a step-by-step guide on how to train a model with P-tuning.
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/accelerate/deepspeed-zero3-offload.md
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeepSpeed [DeepSpeed](https://www.deepspeed.ai/) is a library designed for speed and scale for distributed training of large models with billions of parameters. At its core is the Zero Redundancy Optimizer (ZeRO) that shards optimizer states (ZeRO-1), gradients (ZeRO-2), and parameters (ZeRO-3) across data parallel processes. This drastically reduces memory usage, allowing you to scale your training to billion parameter models. To unlock even more memory efficiency, ZeRO-Offload reduces GPU compute and memory by leveraging CPU resources during optimization. Both of these features are supported in 🤗 Accelerate, and you can use them with 🤗 PEFT. This guide will help you learn how to use our DeepSpeed [training script](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py). You'll configure the script to train a large model for conditional generation with ZeRO-3 and ZeRO-Offload. <Tip> 💡 To help you get started, check out our example training scripts for [causal language modeling](https://github.com/huggingface/peft/blob/main/examples/causal_language_modeling/peft_lora_clm_accelerate_ds_zero3_offload.py) and [conditional generation](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py). You can adapt these scripts for your own applications or even use them out of the box if your task is similar to the one in the scripts. </Tip> ## Configuration Start by running the following command to [create a DeepSpeed configuration file](https://huggingface.co/docs/accelerate/quicktour#launching-your-distributed-script) with 🤗 Accelerate. The `--config_file` flag allows you to save the configuration file to a specific location, otherwise it is saved as a `default_config.yaml` file in the 🤗 Accelerate cache. The configuration file is used to set the default options when you launch the training script. ```bash accelerate config --config_file ds_zero3_cpu.yaml ``` You'll be asked a few questions about your setup, and configure the following arguments. In this example, you'll use ZeRO-3 and ZeRO-Offload so make sure you pick those options. ```bash `zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning `gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them. `gradient_clipping`: Enable gradient clipping with value. `offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2. `offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3. `zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3. `zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3. `mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. ``` An example [configuration file](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/accelerate_ds_zero3_cpu_offload_config.yaml) might look like the following. The most important thing to notice is that `zero_stage` is set to `3`, and `offload_optimizer_device` and `offload_param_device` are set to the `cpu`. ```yml compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} machine_rank: 0 main_training_function: main megatron_lm_config: {} mixed_precision: 'no' num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true use_cpu: false ``` ## The important parts Let's dive a little deeper into the script so you can see what's going on, and understand how it works. Within the [`main`](https://github.com/huggingface/peft/blob/2822398fbe896f25d4dac5e468624dc5fd65a51b/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py#L103) function, the script creates an [`~accelerate.Accelerator`] class to initialize all the necessary requirements for distributed training. <Tip> 💡 Feel free to change the model and dataset inside the `main` function. If your dataset format is different from the one in the script, you may also need to write your own preprocessing function. </Tip> The script also creates a configuration for the 🤗 PEFT method you're using, which in this case, is LoRA. The [`LoraConfig`] specifies the task type and important parameters such as the dimension of the low-rank matrices, the matrices scaling factor, and the dropout probability of the LoRA layers. If you want to use a different 🤗 PEFT method, make sure you replace `LoraConfig` with the appropriate [class](../package_reference/tuners). ```diff def main(): + accelerator = Accelerator() model_name_or_path = "facebook/bart-large" dataset_name = "twitter_complaints" + peft_config = LoraConfig( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ) ``` Throughout the script, you'll see the [`~accelerate.Accelerator.main_process_first`] and [`~accelerate.Accelerator.wait_for_everyone`] functions which help control and synchronize when processes are executed. The [`get_peft_model`] function takes a base model and the [`peft_config`] you prepared earlier to create a [`PeftModel`]: ```diff model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) + model = get_peft_model(model, peft_config) ``` Pass all the relevant training objects to 🤗 Accelerate's [`~accelerate.Accelerator.prepare`] which makes sure everything is ready for training: ```py model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler = accelerator.prepare( model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler ) ``` The next bit of code checks whether the DeepSpeed plugin is used in the `Accelerator`, and if the plugin exists, then the `Accelerator` uses ZeRO-3 as specified in the configuration file: ```py is_ds_zero_3 = False if getattr(accelerator.state, "deepspeed_plugin", None): is_ds_zero_3 = accelerator.state.deepspeed_plugin.zero_stage == 3 ``` Inside the training loop, the usual `loss.backward()` is replaced by 🤗 Accelerate's [`~accelerate.Accelerator.backward`] which uses the correct `backward()` method based on your configuration: ```diff for epoch in range(num_epochs): with TorchTracemalloc() as tracemalloc: model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() ``` That is all! The rest of the script handles the training loop, evaluation, and even pushes it to the Hub for you. ## Train Run the following command to launch the training script. Earlier, you saved the configuration file to `ds_zero3_cpu.yaml`, so you'll need to pass the path to the launcher with the `--config_file` argument like this: ```bash accelerate launch --config_file ds_zero3_cpu.yaml examples/peft_lora_seq2seq_accelerate_ds_zero3_offload.py ``` You'll see some output logs that track memory usage during training, and once it's completed, the script returns the accuracy and compares the predictions to the labels: ```bash GPU Memory before entering the train : 1916 GPU Memory consumed at the end of the train (end-begin): 66 GPU Peak Memory consumed during the train (max-begin): 7488 GPU Total Peak Memory consumed during the train (max): 9404 CPU Memory before entering the train : 19411 CPU Memory consumed at the end of the train (end-begin): 0 CPU Peak Memory consumed during the train (max-begin): 0 CPU Total Peak Memory consumed during the train (max): 19411 epoch=4: train_ppl=tensor(1.0705, device='cuda:0') train_epoch_loss=tensor(0.0681, device='cuda:0') 100%|████████████████████████████████████████████████████████████████████████████████████████████| 7/7 [00:27<00:00, 3.92s/it] GPU Memory before entering the eval : 1982 GPU Memory consumed at the end of the eval (end-begin): -66 GPU Peak Memory consumed during the eval (max-begin): 672 GPU Total Peak Memory consumed during the eval (max): 2654 CPU Memory before entering the eval : 19411 CPU Memory consumed at the end of the eval (end-begin): 0 CPU Peak Memory consumed during the eval (max-begin): 0 CPU Total Peak Memory consumed during the eval (max): 19411 accuracy=100.0 eval_preds[:10]=['no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint', 'no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint'] dataset['train'][label_column][:10]=['no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint', 'no complaint', 'no complaint', 'complaint', 'complaint', 'no complaint'] ```
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/accelerate/fsdp.md
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Fully Sharded Data Parallel [Fully sharded data parallel](https://pytorch.org/docs/stable/fsdp.html) (FSDP) is developed for distributed training of large pretrained models up to 1T parameters. FSDP achieves this by sharding the model parameters, gradients, and optimizer states across data parallel processes and it can also offload sharded model parameters to a CPU. The memory efficiency afforded by FSDP allows you to scale training to larger batch or model sizes. <Tip warning={true}> Currently, FSDP does not confer any reduction in GPU memory usage and FSDP with CPU offload actually consumes 1.65x more GPU memory during training. You can track this PyTorch [issue](https://github.com/pytorch/pytorch/issues/91165) for any updates. </Tip> FSDP is supported in 🤗 Accelerate, and you can use it with 🤗 PEFT. This guide will help you learn how to use our FSDP [training script](https://github.com/huggingface/peft/blob/main/examples/conditional_generation/peft_lora_seq2seq_accelerate_fsdp.py). You'll configure the script to train a large model for conditional generation. ## Configuration Begin by running the following command to [create a FSDP configuration file](https://huggingface.co/docs/accelerate/main/en/usage_guides/fsdp) with 🤗 Accelerate. Use the `--config_file` flag to save the configuration file to a specific location, otherwise it is saved as a `default_config.yaml` file in the 🤗 Accelerate cache. The configuration file is used to set the default options when you launch the training script. ```bash accelerate config --config_file fsdp_config.yaml ``` You'll be asked a few questions about your setup, and configure the following arguments. For this example, make sure you fully shard the model parameters, gradients, optimizer states, leverage the CPU for offloading, and wrap model layers based on the Transformer layer class name. ```bash `Sharding Strategy`: [1] FULL_SHARD (shards optimizer states, gradients and parameters), [2] SHARD_GRAD_OP (shards optimizer states and gradients), [3] NO_SHARD `Offload Params`: Decides Whether to offload parameters and gradients to CPU `Auto Wrap Policy`: [1] TRANSFORMER_BASED_WRAP, [2] SIZE_BASED_WRAP, [3] NO_WRAP `Transformer Layer Class to Wrap`: When using `TRANSFORMER_BASED_WRAP`, user specifies comma-separated string of transformer layer class names (case-sensitive) to wrap ,e.g, `BertLayer`, `GPTJBlock`, `T5Block`, `BertLayer,BertEmbeddings,BertSelfOutput`... `Min Num Params`: minimum number of parameters when using `SIZE_BASED_WRAP` `Backward Prefetch`: [1] BACKWARD_PRE, [2] BACKWARD_POST, [3] NO_PREFETCH `State Dict Type`: [1] FULL_STATE_DICT, [2] LOCAL_STATE_DICT, [3] SHARDED_STATE_DICT ``` For example, your FSDP configuration file may look like the following: ```yaml command_file: null commands: null compute_environment: LOCAL_MACHINE deepspeed_config: {} distributed_type: FSDP downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP fsdp_backward_prefetch_policy: BACKWARD_PRE fsdp_offload_params: true fsdp_sharding_strategy: 1 fsdp_state_dict_type: FULL_STATE_DICT fsdp_transformer_layer_cls_to_wrap: T5Block gpu_ids: null machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main megatron_lm_config: {} mixed_precision: 'no' num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_name: null tpu_zone: null use_cpu: false ``` ## The important parts Let's dig a bit deeper into the training script to understand how it works. The [`main()`](https://github.com/huggingface/peft/blob/2822398fbe896f25d4dac5e468624dc5fd65a51b/examples/conditional_generation/peft_lora_seq2seq_accelerate_fsdp.py#L14) function begins with initializing an [`~accelerate.Accelerator`] class which handles everything for distributed training, such as automatically detecting your training environment. <Tip> 💡 Feel free to change the model and dataset inside the `main` function. If your dataset format is different from the one in the script, you may also need to write your own preprocessing function. </Tip> The script also creates a configuration corresponding to the 🤗 PEFT method you're using. For LoRA, you'll use [`LoraConfig`] to specify the task type, and several other important parameters such as the dimension of the low-rank matrices, the matrices scaling factor, and the dropout probability of the LoRA layers. If you want to use a different 🤗 PEFT method, replace `LoraConfig` with the appropriate [class](../package_reference/tuners). Next, the script wraps the base model and `peft_config` with the [`get_peft_model`] function to create a [`PeftModel`]. ```diff def main(): + accelerator = Accelerator() model_name_or_path = "t5-base" base_path = "temp/data/FinancialPhraseBank-v1.0" + peft_config = LoraConfig( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) + model = get_peft_model(model, peft_config) ``` Throughout the script, you'll see the [`~accelerate.Accelerator.main_process_first`] and [`~accelerate.Accelerator.wait_for_everyone`] functions which help control and synchronize when processes are executed. After your dataset is prepared, and all the necessary training components are loaded, the script checks if you're using the `fsdp_plugin`. PyTorch offers two ways for wrapping model layers in FSDP, automatically or manually. The simplest method is to allow FSDP to automatically recursively wrap model layers without changing any other code. You can choose to wrap the model layers based on the layer name or on the size (number of parameters). In the FSDP configuration file, it uses the `TRANSFORMER_BASED_WRAP` option to wrap the [`T5Block`] layer. ```py if getattr(accelerator.state, "fsdp_plugin", None) is not None: accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(model) ``` Next, use 🤗 Accelerate's [`~accelerate.Accelerator.prepare`] function to prepare the model, datasets, optimizer, and scheduler for training. ```py model, train_dataloader, eval_dataloader, optimizer, lr_scheduler = accelerator.prepare( model, train_dataloader, eval_dataloader, optimizer, lr_scheduler ) ``` From here, the remainder of the script handles the training loop, evaluation, and sharing your model to the Hub. ## Train Run the following command to launch the training script. Earlier, you saved the configuration file to `fsdp_config.yaml`, so you'll need to pass the path to the launcher with the `--config_file` argument like this: ```bash accelerate launch --config_file fsdp_config.yaml examples/peft_lora_seq2seq_accelerate_fsdp.py ``` Once training is complete, the script returns the accuracy and compares the predictions to the labels.
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/tutorial/peft_integrations.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PEFT integrations PEFT's practical benefits extends to other Hugging Face libraries like [Diffusers](https://hf.co/docs/diffusers) and [Transformers](https://hf.co/docs/transformers). One of the main benefits of PEFT is that an adapter file generated by a PEFT method is a lot smaller than the original model, which makes it super easy to manage and use multiple adapters. You can use one pretrained base model for multiple tasks by simply loading a new adapter finetuned for the task you're solving. Or you can combine multiple adapters with a text-to-image diffusion model to create new effects. This tutorial will show you how PEFT can help you manage adapters in Diffusers and Transformers. ## Diffusers Diffusers is a generative AI library for creating images and videos from text or images with diffusion models. LoRA is an especially popular training method for diffusion models because you can very quickly train and share diffusion models to generate images in new styles. To make it easier to use and try multiple LoRA models, Diffusers uses the PEFT library to help manage different adapters for inference. For example, load a base model and then load the [artificialguybr/3DRedmond-V1](https://huggingface.co/artificialguybr/3DRedmond-V1) adapter for inference with the [`load_lora_weights`](https://huggingface.co/docs/diffusers/v0.24.0/en/api/loaders/lora#diffusers.loaders.LoraLoaderMixin.load_lora_weights) method. The `adapter_name` argument in the loading method is enabled by PEFT and allows you to set a name for the adapter so it is easier to reference. ```py import torch from diffusers import DiffusionPipeline pipeline = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ).to("cuda") pipeline.load_lora_weights( "peft-internal-testing/artificialguybr__3DRedmond-V1", weight_name="3DRedmond-3DRenderStyle-3DRenderAF.safetensors", adapter_name="3d" ) image = pipeline("sushi rolls shaped like kawaii cat faces").images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/test-lora-diffusers.png"/> </div> Now let's try another cool LoRA model, [ostris/super-cereal-sdxl-lora](https://huggingface.co/ostris/super-cereal-sdxl-lora). All you need to do is load and name this new adapter with `adapter_name`, and use the [`set_adapters`](https://huggingface.co/docs/diffusers/api/loaders/unet#diffusers.loaders.UNet2DConditionLoadersMixin.set_adapters) method to set it as the currently active adapter. ```py pipeline.load_lora_weights( "ostris/super-cereal-sdxl-lora", weight_name="cereal_box_sdxl_v1.safetensors", adapter_name="cereal" ) pipeline.set_adapters("cereal") image = pipeline("sushi rolls shaped like kawaii cat faces").images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/test-lora-diffusers-2.png"/> </div> Finally, you can call the [`disable_lora`](https://huggingface.co/docs/diffusers/api/loaders/unet#diffusers.loaders.UNet2DConditionLoadersMixin.disable_lora) method to restore the base model. ```py pipeline.disable_lora() ``` Learn more about how PEFT supports Diffusers in the [Inference with PEFT](https://huggingface.co/docs/diffusers/tutorials/using_peft_for_inference) tutorial. ## Transformers Transformers is a collection of pretrained models for all types of tasks in all modalities. You can load these models for training or inference. Many of the models are large language models (LLMs), so it makes sense to integrate PEFT with Transformers to manage and train adapters. Load a base pretrained model to train. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") ``` Next, add an adapter configuration to specify how to adapt the model parameters. Call the [`~PeftModel.add_adapter`] method to add the configuration to the base model. ```py from peft import LoraConfig config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", task_type="CAUSAL_LM" ) model.add_adapter(peft_config) ``` Now you can train the model with Transformer's [`~transformers.Trainer`] class or whichever training framework you prefer. To use the newly trained model for inference, the [`~transformers.AutoModel`] class uses PEFT on the backend to load the adapter weights and configuration file into a base pretrained model. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora") ``` If you're interested in comparing or using more than one adapter, you can also call the [`~PeftModel.add_adapter`] method to add the adapter configuration to the base model. The only requirement is the adapter type must be the same (you can't mix a LoRA and LoHa adapter). ```py from transformers import AutoModelForCausalLM from peft import LoraConfig model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") model.add_adapter(lora_config_1, adapter_name="adapter_1") ``` Call [`~PeftModel.add_adapter`] again to attach a new adapter to the base model. ```py model.add_adapter(lora_config_2, adapter_name="adapter_2") ``` Then you can use [`~PeftModel.set_adapter`] to set the currently active adapter. ```py model.set_adapter("adapter_1") output = model.generate(**inputs) print(tokenizer.decode(output_disabled[0], skip_special_tokens=True)) ``` To disable the adapter, call the [`~PeftModel.disable_adapter`] method. ```py model.disable_adapter() ``` If you're curious, check out the [Load and train adapters with PEFT](https://huggingface.co/docs/transformers/main/peft) tutorial to learn more.
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/tutorial/peft_model_config.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PEFT configurations and models The sheer size of today's large pretrained models - which commonly have billions of parameters - present a significant training challenge because they require more storage space and more computational power to crunch all those calculations. You'll need access to powerful GPUs or TPUs to train these large pretrained models which is expensive, not widely accessible to everyone, not environmentally friendly, and not very practical. PEFT methods address many of these challenges. There are several types of PEFT methods (soft prompting, matrix decomposition, adapters), but they all focus on the same thing, reduce the number of trainable parameters. This makes it more accessible to train and store large models on consumer hardware. The PEFT library is designed to help you quickly train large models on free or low-cost GPUs, and in this tutorial, you'll learn how to setup a configuration to apply a PEFT method to a pretrained base model for training. Once the PEFT configuration is setup, you can use any training framework you like (Transformer's [`~transformers.Trainer`] class, [Accelerate](https://hf.co/docs/accelerate), a custom PyTorch training loop). ## PEFT configurations <Tip> Learn more about the parameters you can configure for each PEFT method in their respective API reference page. </Tip> A configuration stores important parameters that specify how a particular PEFT method should be applied. For example, take a look at the following [`LoraConfig`](https://huggingface.co/ybelkada/opt-350m-lora/blob/main/adapter_config.json) for applying LoRA and [`PromptEncoderConfig`](https://huggingface.co/smangrul/roberta-large-peft-p-tuning/blob/main/adapter_config.json) for applying p-tuning (these configuration files are already JSON-serialized). Whenever you load a PEFT adapter, it is a good idea to check whether it has an associated adapter_config.json file which is required. <hfoptions id="config"> <hfoption id="LoraConfig"> ```json { "base_model_name_or_path": "facebook/opt-350m", #base model to apply LoRA to "bias": "none", "fan_in_fan_out": false, "inference_mode": true, "init_lora_weights": true, "layers_pattern": null, "layers_to_transform": null, "lora_alpha": 32, "lora_dropout": 0.05, "modules_to_save": null, "peft_type": "LORA", #PEFT method type "r": 16, "revision": null, "target_modules": [ "q_proj", #model modules to apply LoRA to (query and value projection layers) "v_proj" ], "task_type": "CAUSAL_LM" #type of task to train model on } ``` You can create your own configuration for training by initializing a [`LoraConfig`]. ```py from peft import LoraConfig, TaskType lora_config = LoraConfig( r=16, target_modules=["q_proj", "v_proj"], task_type=TaskType.CAUSAL_LM, lora_alpha=32, lora_dropout=0.05 ) ``` </hfoption> <hfoption id="PromptEncoderConfig"> ```json { "base_model_name_or_path": "roberta-large", #base model to apply p-tuning to "encoder_dropout": 0.0, "encoder_hidden_size": 128, "encoder_num_layers": 2, "encoder_reparameterization_type": "MLP", "inference_mode": true, "num_attention_heads": 16, "num_layers": 24, "num_transformer_submodules": 1, "num_virtual_tokens": 20, "peft_type": "P_TUNING", #PEFT method type "task_type": "SEQ_CLS", #type of task to train model on "token_dim": 1024 } ``` You can create your own configuration for training by initializing a [`PromptEncoderConfig`]. ```py from peft import PromptEncoderConfig, TaskType p_tuning_config = PromptEncoderConfig( encoder_reprameterization_type="MLP", encoder_hidden_size=128, num_attention_heads=16, num_layers=24, num_transformer_submodules=1, num_virtual_tokens=20, token_dim=1024, task_type=TaskType.SEQ_CLS ) ``` </hfoption> </hfoptions> ## PEFT models With a PEFT configuration in hand, you can now apply it to any pretrained model to create a [`PeftModel`]. Choose from any of the state-of-the-art models from the [Transformers](https://hf.co/docs/transformers) library, a custom model, and even new and unsupported transformer architectures. For this tutorial, load a base [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) model to finetune. ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") ``` Use the [`get_peft_model`] function to create a [`PeftModel`] from the base facebook/opt-350m model and the `lora_config` you created earlier. ```py from peft import get_peft_model lora_model = get_peft_model(model, lora_config) lora_model.print_trainable_parameters() "trainable params: 1,572,864 || all params: 332,769,280 || trainable%: 0.472659014678278" ``` Now you can train the [`PeftModel`] with your preferred training framework! After training, you can save your model locally with [`~PeftModel.save_pretrained`] or upload it to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method. ```py # save locally lora_model.save_pretrained("your-name/opt-350m-lora") # push to Hub lora_model.push_to_hub("your-name/opt-350m-lora") ``` To load a [`PeftModel`] for inference, you'll need to provide the [`PeftConfig`] used to create it and the base model it was trained from. ```py from peft import PeftModel, PeftConfig config = PeftConfig.from_pretrained("ybelkada/opt-350m-lora") model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path) lora_model = PeftModel.from_pretrained(model, "ybelkada/opt-350m-lora") ``` <Tip> By default, the [`PeftModel`] is set for inference, but if you'd like to train the adapter some more you can set `is_trainable=True`. ```py lora_model = PeftModel.from_pretrained(model, "ybelkada/opt-350m-lora", is_trainable=True) ``` </Tip> The [`PeftModel.from_pretrained`] method is the most flexible way to load a [`PeftModel`] because it doesn't matter what model framework was used (Transformers, timm, a generic PyTorch model). Other classes, like [`AutoPeftModel`], are just a convenient wrapper around the base [`PeftModel`], and makes it easier to load PEFT models directly from the Hub or locally where the PEFT weights are stored. ```py from peft import AutoPeftModelForCausalLM lora_model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora") ``` Take a look at the [AutoPeftModel](package_reference/auto_class) API reference to learn more about the [`AutoPeftModel`] classes. ## Next steps With the appropriate [`PeftConfig`], you can apply it to any pretrained model to create a [`PeftModel`] and train large powerful models faster on freely available GPUs! To learn more about PEFT configurations and models, the following guide may be helpful: * Learn how to configure a PEFT method for models that aren't from Transformers in the [Working with custom models](../developer_guides/custom_models) guide.
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/peft_model.md
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Models [`PeftModel`] is the base model class for specifying the base Transformer model and configuration to apply a PEFT method to. The base `PeftModel` contains methods for loading and saving models from the Hub. ## PeftModel [[autodoc]] PeftModel - all ## PeftModelForSequenceClassification A `PeftModel` for sequence classification tasks. [[autodoc]] PeftModelForSequenceClassification - all ## PeftModelForTokenClassification A `PeftModel` for token classification tasks. [[autodoc]] PeftModelForTokenClassification - all ## PeftModelForCausalLM A `PeftModel` for causal language modeling. [[autodoc]] PeftModelForCausalLM - all ## PeftModelForSeq2SeqLM A `PeftModel` for sequence-to-sequence language modeling. [[autodoc]] PeftModelForSeq2SeqLM - all ## PeftModelForQuestionAnswering A `PeftModel` for question answering. [[autodoc]] PeftModelForQuestionAnswering - all ## PeftModelForFeatureExtraction A `PeftModel` for getting extracting features/embeddings from transformer models. [[autodoc]] PeftModelForFeatureExtraction - all ## Utilities [[autodoc]] get_peft_model
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/llama_adapter.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Llama-Adapter [Llama-Adapter](https://hf.co/papers/2303.16199) is a PEFT method specifically designed for turning Llama into an instruction-following model. The Llama model is frozen and only a set of adaptation prompts prefixed to the input instruction tokens are learned. Since randomly initialized modules inserted into the model can cause the model to lose some of its existing knowledge, Llama-Adapter uses zero-initialized attention with zero gating to progressively add the instructional prompts to the model. The abstract from the paper is: *We present LLaMA-Adapter, a lightweight adaption method to efficiently fine-tune LLaMA into an instruction-following model. Using 52K self-instruct demonstrations, LLaMA-Adapter only introduces 1.2M learnable parameters upon the frozen LLaMA 7B model, and costs less than one hour for fine-tuning on 8 A100 GPUs. Specifically, we adopt a set of learnable adaption prompts, and prepend them to the input text tokens at higher transformer layers. Then, a zero-init attention mechanism with zero gating is proposed, which adaptively injects the new instructional cues into LLaMA, while effectively preserves its pre-trained knowledge. With efficient training, LLaMA-Adapter generates high-quality responses, comparable to Alpaca with fully fine-tuned 7B parameters. Furthermore, our approach can be simply extended to multi-modal input, e.g., images, for image-conditioned LLaMA, which achieves superior reasoning capacity on ScienceQA. We release our code at https://github.com/ZrrSkywalker/LLaMA-Adapter*. ## AdaptionPromptConfig [[autodoc]] tuners.adaption_prompt.config.AdaptionPromptConfig ## AdaptionPromptModel [[autodoc]] tuners.adaption_prompt.model.AdaptionPromptModel
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/ia3.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # IA3 Infused Adapter by Inhibiting and Amplifying Inner Activations, or [IA3](https://hf.co/papers/2205.05638), is a method that adds three learned vectors to rescale the keys and values of the self-attention and encoder-decoder attention layers, and the intermediate activation of the position-wise feed-forward network. The abstract from the paper is: *Few-shot in-context learning (ICL) enables pre-trained language models to perform a previously-unseen task without any gradient-based training by feeding a small number of training examples as part of the input. ICL incurs substantial computational, memory, and storage costs because it involves processing all of the training examples every time a prediction is made. Parameter-efficient fine-tuning (PEFT) (e.g. adapter modules, prompt tuning, sparse update methods, etc.) offers an alternative paradigm where a small set of parameters are trained to enable a model to perform the new task. In this paper, we rigorously compare few-shot ICL and PEFT and demonstrate that the latter offers better accuracy as well as dramatically lower computational costs. Along the way, we introduce a new PEFT method called (IA)^3 that scales activations by learned vectors, attaining stronger performance while only introducing a relatively tiny amount of new parameters. We also propose a simple recipe based on the T0 model called T-Few that can be applied to new tasks without task-specific tuning or modifications. We validate the effectiveness of T-Few on completely unseen tasks by applying it to the RAFT benchmark, attaining super-human performance for the first time and outperforming the state-of-the-art by 6% absolute. All of the code used in our experiments is publicly available*. ## IA3Config [[autodoc]] tuners.ia3.config.IA3Config ## IA3Model [[autodoc]] tuners.ia3.model.IA3Model
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/adalora.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # AdaLoRA [AdaLoRA](https://hf.co/papers/2303.10512) is a method for optimizing the number of trainable parameters to assign to weight matrices and layers, unlike LoRA, which distributes parameters evenly across all modules. More parameters are budgeted for important weight matrices and layers while less important ones receive fewer parameters. The abstract from the paper is: *Fine-tuning large pre-trained language models on downstream tasks has become an important paradigm in NLP. However, common practice fine-tunes all of the parameters in a pre-trained model, which becomes prohibitive when a large number of downstream tasks are present. Therefore, many fine-tuning methods are proposed to learn incremental updates of pre-trained weights in a parameter efficient way, e.g., low-rank increments. These methods often evenly distribute the budget of incremental updates across all pre-trained weight matrices, and overlook the varying importance of different weight parameters. As a consequence, the fine-tuning performance is suboptimal. To bridge this gap, we propose AdaLoRA, which adaptively allocates the parameter budget among weight matrices according to their importance score. In particular, AdaLoRA parameterizes the incremental updates in the form of singular value decomposition. Such a novel approach allows us to effectively prune the singular values of unimportant updates, which is essentially to reduce their parameter budget but circumvent intensive exact SVD computations. We conduct extensive experiments with several pre-trained models on natural language processing, question answering, and natural language generation to validate the effectiveness of AdaLoRA. Results demonstrate that AdaLoRA manifests notable improvement over baselines, especially in the low budget settings. Our code is publicly available at https://github.com/QingruZhang/AdaLoRA*. ## AdaLoraConfig [[autodoc]] tuners.adalora.config.AdaLoraConfig ## AdaLoraModel [[autodoc]] tuners.adalora.model.AdaLoraModel
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/prompt_tuning.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Prompt tuning [Prompt tuning](https://hf.co/papers/2104.08691) adds task-specific prompts to the input, and these prompt parameters are updated independently of the pretrained model parameters which are frozen. The abstract from the paper is: *In this work, we explore "prompt tuning", a simple yet effective mechanism for learning "soft prompts" to condition frozen language models to perform specific downstream tasks. Unlike the discrete text prompts used by GPT-3, soft prompts are learned through backpropagation and can be tuned to incorporate signal from any number of labeled examples. Our end-to-end learned approach outperforms GPT-3's "few-shot" learning by a large margin. More remarkably, through ablations on model size using T5, we show that prompt tuning becomes more competitive with scale: as models exceed billions of parameters, our method "closes the gap" and matches the strong performance of model tuning (where all model weights are tuned). This finding is especially relevant in that large models are costly to share and serve, and the ability to reuse one frozen model for multiple downstream tasks can ease this burden. Our method can be seen as a simplification of the recently proposed "prefix tuning" of Li and Liang (2021), and we provide a comparison to this and other similar approaches. Finally, we show that conditioning a frozen model with soft prompts confers benefits in robustness to domain transfer, as compared to full model tuning*. ## PromptTuningConfig [[autodoc]] tuners.prompt_tuning.config.PromptTuningConfig ## PromptEmbedding [[autodoc]] tuners.prompt_tuning.model.PromptEmbedding
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/tuners.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tuners A tuner (or adapter) is a module that can be plugged into a `torch.nn.Module`. [`BaseTuner`] base class for other tuners and provides shared methods and attributes for preparing an adapter configuration and replacing a target module with the adapter module. [`BaseTunerLayer`] is a base class for adapter layers. It offers methods and attributes for managing adapters such as activating and disabling adapters. ## BaseTuner [[autodoc]] tuners.tuners_utils.BaseTuner ## BaseTunerLayer [[autodoc]] tuners.tuners_utils.BaseTunerLayer
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/multitask_prompt_tuning.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Multitask Prompt Tuning [Multitask Prompt Tuning](https://huggingface.co/papers/2303.02861) decomposes the soft prompts of each task into a single learned transferable prompt instead of a separate prompt for each task. The single learned prompt can be adapted for each task by multiplicative low rank updates. The abstract from the paper is: *Prompt tuning, in which a base pretrained model is adapted to each task via conditioning on learned prompt vectors, has emerged as a promising approach for efficiently adapting large language models to multiple downstream tasks. However, existing methods typically learn soft prompt vectors from scratch, and it has not been clear how to exploit the rich cross-task knowledge with prompt vectors in a multitask learning setting. We propose multitask prompt tuning (MPT), which first learns a single transferable prompt by distilling knowledge from multiple task-specific source prompts. We then learn multiplicative low rank updates to this shared prompt to efficiently adapt it to each downstream target task. Extensive experiments on 23 NLP datasets demonstrate that our proposed approach outperforms the state-of-the-art methods, including the full finetuning baseline in some cases, despite only tuning 0.035% as many task-specific parameters*. ## MultitaskPromptTuningConfig [[autodoc]] tuners.multitask_prompt_tuning.config.MultitaskPromptTuningConfig ## MultitaskPromptEmbedding [[autodoc]] tuners.multitask_prompt_tuning.model.MultitaskPromptEmbedding
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/lora.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LoRA Low-Rank Adaptation ([LoRA](https://huggingface.co/papers/2309.15223)) is a PEFT method that decomposes a large matrix into two smaller low-rank matrices in the attention layers. This drastically reduces the number of parameters that need to be fine-tuned. The abstract from the paper is: *We propose a neural language modeling system based on low-rank adaptation (LoRA) for speech recognition output rescoring. Although pretrained language models (LMs) like BERT have shown superior performance in second-pass rescoring, the high computational cost of scaling up the pretraining stage and adapting the pretrained models to specific domains limit their practical use in rescoring. Here we present a method based on low-rank decomposition to train a rescoring BERT model and adapt it to new domains using only a fraction (0.08%) of the pretrained parameters. These inserted matrices are optimized through a discriminative training objective along with a correlation-based regularization loss. The proposed low-rank adaptation Rescore-BERT (LoRB) architecture is evaluated on LibriSpeech and internal datasets with decreased training times by factors between 5.4 and 3.6.*. ## LoraConfig [[autodoc]] tuners.lora.config.LoraConfig ## LoraModel [[autodoc]] tuners.lora.model.LoraModel
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/config.md
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Configuration [`PeftConfigMixin`] is the base configuration class for storing the adapter configuration of a [`PeftModel`], and [`PromptLearningConfig`] is the base configuration class for soft prompt methods (p-tuning, prefix tuning, and prompt tuning). These base classes contain methods for saving and loading model configurations from the Hub, specifying the PEFT method to use, type of task to perform, and model configurations like number of layers and number of attention heads. ## PeftConfigMixin [[autodoc]] config.PeftConfigMixin - all ## PeftConfig [[autodoc]] PeftConfig - all ## PromptLearningConfig [[autodoc]] PromptLearningConfig - all
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/loha.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LoHa Low-Rank Hadamard Product ([LoHa](https://huggingface.co/papers/2108.06098)), is similar to LoRA except it approximates the large weight matrix with more low-rank matrices and combines them with the Hadamard product. This method is even more parameter-efficient than LoRA and achieves comparable performance. The abstract from the paper is: *In this work, we propose a communication-efficient parameterization, FedPara, for federated learning (FL) to overcome the burdens on frequent model uploads and downloads. Our method re-parameterizes weight parameters of layers using low-rank weights followed by the Hadamard product. Compared to the conventional low-rank parameterization, our FedPara method is not restricted to low-rank constraints, and thereby it has a far larger capacity. This property enables to achieve comparable performance while requiring 3 to 10 times lower communication costs than the model with the original layers, which is not achievable by the traditional low-rank methods. The efficiency of our method can be further improved by combining with other efficient FL optimizers. In addition, we extend our method to a personalized FL application, pFedPara, which separates parameters into global and local ones. We show that pFedPara outperforms competing personalized FL methods with more than three times fewer parameters*. ## LoHaConfig [[autodoc]] tuners.loha.config.LoHaConfig ## LoHaModel [[autodoc]] tuners.loha.model.LoHaModel
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/auto_class.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # AutoPeftModels The `AutoPeftModel` classes loads the appropriate PEFT model for the task type by automatically inferring it from the configuration file. They are designed to quickly and easily load a PEFT model in a single line of code without having to worry about which exact model class you need or manually loading a [`PeftConfig`]. ## AutoPeftModel [[autodoc]] auto.AutoPeftModel - from_pretrained ## AutoPeftModelForCausalLM [[autodoc]] auto.AutoPeftModelForCausalLM ## AutoPeftModelForSeq2SeqLM [[autodoc]] auto.AutoPeftModelForSeq2SeqLM ## AutoPeftModelForSequenceClassification [[autodoc]] auto.AutoPeftModelForSequenceClassification ## AutoPeftModelForTokenClassification [[autodoc]] auto.AutoPeftModelForTokenClassification ## AutoPeftModelForQuestionAnswering [[autodoc]] auto.AutoPeftModelForQuestionAnswering ## AutoPeftModelForFeatureExtraction [[autodoc]] auto.AutoPeftModelForFeatureExtraction
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/p_tuning.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # P-tuning [P-tuning](https://hf.co/papers/2103.10385) adds trainable prompt embeddings to the input that is optimized by a prompt encoder to find a better prompt, eliminating the need to manually design prompts. The prompt tokens can be added anywhere in the input sequence, and p-tuning also introduces anchor tokens for improving performance. The abstract from the paper is: *While GPTs with traditional fine-tuning fail to achieve strong results on natural language understanding (NLU), we show that GPTs can be better than or comparable to similar-sized BERTs on NLU tasks with a novel method P-tuning -- which employs trainable continuous prompt embeddings. On the knowledge probing (LAMA) benchmark, the best GPT recovers 64\% (P@1) of world knowledge without any additional text provided during test time, which substantially improves the previous best by 20+ percentage points. On the SuperGlue benchmark, GPTs achieve comparable and sometimes better performance to similar-sized BERTs in supervised learning. Importantly, we find that P-tuning also improves BERTs' performance in both few-shot and supervised settings while largely reducing the need for prompt engineering. Consequently, P-tuning outperforms the state-of-the-art approaches on the few-shot SuperGlue benchmark.*. ## PromptEncoderConfig [[autodoc]] tuners.p_tuning.config.PromptEncoderConfig ## PromptEncoder [[autodoc]] tuners.p_tuning.model.PromptEncoder
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/peft_types.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PEFT types [`PeftType`] includes the supported adapters in PEFT, and [`TaskType`] includes PEFT-supported tasks. ## PeftType [[autodoc]] utils.peft_types.PeftType ## TaskType [[autodoc]] utils.peft_types.TaskType
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/oft.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # OFT [Orthogonal Finetuning (OFT)](https://hf.co/papers/2306.07280) is a method developed for adapting text-to-image diffusion models. It works by reparameterizing the pretrained weight matrices with it's orthogonal matrix to preserve information in the pretrained model. To reduce the number of parameters, OFT introduces a block-diagonal structure in the orthogonal matrix. The abstract from the paper is: *Large text-to-image diffusion models have impressive capabilities in generating photorealistic images from text prompts. How to effectively guide or control these powerful models to perform different downstream tasks becomes an important open problem. To tackle this challenge, we introduce a principled finetuning method -- Orthogonal Finetuning (OFT), for adapting text-to-image diffusion models to downstream tasks. Unlike existing methods, OFT can provably preserve hyperspherical energy which characterizes the pairwise neuron relationship on the unit hypersphere. We find that this property is crucial for preserving the semantic generation ability of text-to-image diffusion models. To improve finetuning stability, we further propose Constrained Orthogonal Finetuning (COFT) which imposes an additional radius constraint to the hypersphere. Specifically, we consider two important finetuning text-to-image tasks: subject-driven generation where the goal is to generate subject-specific images given a few images of a subject and a text prompt, and controllable generation where the goal is to enable the model to take in additional control signals. We empirically show that our OFT framework outperforms existing methods in generation quality and convergence speed*. ## OFTConfig [[autodoc]] tuners.oft.config.OFTConfig ## OFTModel [[autodoc]] tuners.oft.model.OFTModel
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/adapter_utils.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LyCORIS [LyCORIS](https://hf.co/papers/2309.14859) (Lora beYond Conventional methods, Other Rank adaptation Implementations for Stable diffusion) are LoRA-like matrix decomposition adapters that modify the cross-attention layer of the UNet. The [LoHa](loha) and [LoKr](lokr) methods inherit from the `Lycoris` classes here. ## LycorisConfig [[autodoc]] tuners.lycoris_utils.LycorisConfig ## LycorisLayer [[autodoc]] tuners.lycoris_utils.LycorisLayer ## LycorisTuner [[autodoc]] tuners.lycoris_utils.LycorisTuner
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/prefix_tuning.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Prefix tuning [Prefix tuning](https://hf.co/papers/2101.00190) prefixes a series of task-specific vectors to the input sequence that can be learned while keeping the pretrained model frozen. The prefix parameters are inserted in all of the model layers. The abstract from the paper is: *Fine-tuning is the de facto way to leverage large pretrained language models to perform downstream tasks. However, it modifies all the language model parameters and therefore necessitates storing a full copy for each task. In this paper, we propose prefix-tuning, a lightweight alternative to fine-tuning for natural language generation tasks, which keeps language model parameters frozen, but optimizes a small continuous task-specific vector (called the prefix). Prefix-tuning draws inspiration from prompting, allowing subsequent tokens to attend to this prefix as if it were "virtual tokens". We apply prefix-tuning to GPT-2 for table-to-text generation and to BART for summarization. We find that by learning only 0.1\% of the parameters, prefix-tuning obtains comparable performance in the full data setting, outperforms fine-tuning in low-data settings, and extrapolates better to examples with topics unseen during training*. ## PrefixTuningConfig [[autodoc]] tuners.prefix_tuning.config.PrefixTuningConfig ## PrefixEncoder [[autodoc]] tuners.prefix_tuning.model.PrefixEncoder
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/package_reference/lokr.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LoKr Low-Rank Kronecker Product ([LoKr](https://hf.co/papers/2309.14859)), is a LoRA-variant method that approximates the large weight matrix with two low-rank matrices and combines them with the Kronecker product. LoKr also provides an optional third low-rank matrix to provide better control during fine-tuning. ## LoKrConfig [[autodoc]] tuners.lokr.config.LoKrConfig ## LoKrModel [[autodoc]] tuners.lokr.model.LoKrModel
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/developer_guides/mixed_models.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Working with mixed adapter types Normally, it is not possible to mix different adapter types in 🤗 PEFT. For example, even though it is possible to create a PEFT model that has two different LoRA adapters (that can have different config options), it is not possible to combine a LoRA adapter with a LoHa adapter. However, by using a mixed model, this works as long as the adapter types are compatible. ## Loading different adapter types into a PEFT model To load different adapter types into a PEFT model, proceed the same as if you were loading two adapters of the same type, but use `PeftMixedModel` instead of `PeftModel`: ```py from peft import PeftMixedModel base_model = ... # load the base model, e.g. from transformers # load first adapter, which will be called "default" peft_model = PeftMixedModel.from_pretrained(base_model, <path_to_adapter1>) peft_model.load_adapter(<path_to_adapter2>, adapter_name="other") peft_model.set_adapter(["default", "other"]) ``` The last line is necessary if you want to activate both adapters, otherwise, only the first adapter would be active. Of course, you can add more different adapters by calling `add_adapter` repeatedly. Currently, the main purpose of mixed adapter types is to combine trained adapters for inference. Although it is technically also possible to train a mixed adapter model, this has not been tested and is not recommended. ## Tips - Not all adapter types can be combined. See `peft.tuners.mixed.COMPATIBLE_TUNER_TYPES` for a list of compatible types. An error will be raised if you are trying to combine incompatible adapter types. - It is possible to mix multiple adapters of the same type. This can be useful to combine adapters with very different configs. - If you want to combine a lot of different adapters, it is most performant to add the same types of adapters consecutively. E.g., add LoRA1, LoRA2, LoHa1, LoHa2 in this order, instead of LoRA1, LoHa1, LoRA2, LoHa2. The order will make a difference for the outcome in most cases, but since no order is better a priori, it is best to choose the order that is most performant.
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/developer_guides/troubleshooting.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Troubleshooting If you encounter any issue when using PEFT, please check the following list of common issues and their solutions. ## Examples don't work Examples often rely on the most recent package versions, so please ensure they're up-to-date. In particular, check the version of the following packages: - `peft` - `transformers` - `accelerate` - `torch` In general, you can update the package version by running this command inside your Python environment: ```bash python -m pip install -U <package_name> ``` Installing PEFT from source is useful for keeping up with the latest developments: ```bash python -m pip install git+https://github.com/huggingface/peft ``` ## Bad results from a loaded PEFT model There can be several reasons for getting a poor result from a loaded PEFT model, which are listed below. If you're still unable to troubleshoot the problem, see if anyone else had a similar [issue](https://github.com/huggingface/peft/issues) on GitHub, and if you can't find any, open a new issue. When opening an issue, it helps a lot if you provide a minimal code example that reproduces the issue. Also, please report if the loaded model performs at the same level as the model did before fine-tuning, if it performs at a random level, or if it is only slightly worse than expected. This information helps us identify the problem more quickly. ### Random deviations If your model outputs are not exactly the same as previous runs, there could be an issue with random elements. For example: 1. please ensure it is in `.eval()` mode, which is important, for instance, if the model uses dropout 2. if you use [`~transformers.GenerationMixin.generate`] on a language model, there could be random sampling, so obtaining the same result requires setting a random seed 3. if you used quantization and merged the weights, small deviations are expected due to rounding errors ### Incorrectly loaded model Please ensure that you load the model correctly. A common error is trying to load a _trained_ model with `get_peft_model`, which is incorrect. Instead, the loading code should look like this: ```python from peft import PeftModel, PeftConfig base_model = ... # to load the base model, use the same code as when you trained it config = PeftConfig.from_pretrained(peft_model_id) peft_model = PeftModel.from_pretrained(base_model, peft_model_id) ``` ### Randomly initialized layers For some tasks, it is important to correctly configure `modules_to_save` in the config to account for randomly initialized layers. As an example, this is necessary if you use LoRA to fine-tune a language model for sequence classification because 🤗 Transformers adds a randomly initialized classification head on top of the model. If you do not add this layer to `modules_to_save`, the classification head won't be saved. The next time you load the model, you'll get a _different_ randomly initialized classification head, resulting in completely different results. In PEFT, we try to correctly guess the `modules_to_save` if you provide the `task_type` argument in the config. This should work for transformers models that follow the standard naming scheme. It is always a good idea to double check though because we can't guarantee all models follow the naming scheme. When you load a transformers model that has randomly initialized layers, you should see a warning along the lines of: ``` Some weights of <MODEL> were not initialized from the model checkpoint at <ID> and are newly initialized: [<LAYER_NAMES>]. You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. ``` The mentioned layers should be added to `modules_to_save` in the config to avoid the described problem.
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/developer_guides/custom_models.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Working with custom models Some fine-tuning techniques, such as prompt tuning, are specific to language models. That means in 🤗 PEFT, it is assumed a 🤗 Transformers model is being used. However, other fine-tuning techniques - like [LoRA](../conceptual_guides/lora) - are not restricted to specific model types. In this guide, we will see how LoRA can be applied to a multilayer perceptron and a computer vision model from the [timm](https://huggingface.co/docs/timm/index) library. ## Multilayer perceptron Let's assume that we want to fine-tune a multilayer perceptron with LoRA. Here is the definition: ```python from torch import nn class MLP(nn.Module): def __init__(self, num_units_hidden=2000): super().__init__() self.seq = nn.Sequential( nn.Linear(20, num_units_hidden), nn.ReLU(), nn.Linear(num_units_hidden, num_units_hidden), nn.ReLU(), nn.Linear(num_units_hidden, 2), nn.LogSoftmax(dim=-1), ) def forward(self, X): return self.seq(X) ``` This is a straightforward multilayer perceptron with an input layer, a hidden layer, and an output layer. <Tip> For this toy example, we choose an exceedingly large number of hidden units to highlight the efficiency gains from PEFT, but those gains are in line with more realistic examples. </Tip> There are a few linear layers in this model that could be tuned with LoRA. When working with common 🤗 Transformers models, PEFT will know which layers to apply LoRA to, but in this case, it is up to us as a user to choose the layers. To determine the names of the layers to tune: ```python print([(n, type(m)) for n, m in MLP().named_modules()]) ``` This should print: ``` [('', __main__.MLP), ('seq', torch.nn.modules.container.Sequential), ('seq.0', torch.nn.modules.linear.Linear), ('seq.1', torch.nn.modules.activation.ReLU), ('seq.2', torch.nn.modules.linear.Linear), ('seq.3', torch.nn.modules.activation.ReLU), ('seq.4', torch.nn.modules.linear.Linear), ('seq.5', torch.nn.modules.activation.LogSoftmax)] ``` Let's say we want to apply LoRA to the input layer and to the hidden layer, those are `'seq.0'` and `'seq.2'`. Moreover, let's assume we want to update the output layer without LoRA, that would be `'seq.4'`. The corresponding config would be: ```python from peft import LoraConfig config = LoraConfig( target_modules=["seq.0", "seq.2"], modules_to_save=["seq.4"], ) ``` With that, we can create our PEFT model and check the fraction of parameters trained: ```python from peft import get_peft_model model = MLP() peft_model = get_peft_model(model, config) peft_model.print_trainable_parameters() # prints trainable params: 56,164 || all params: 4,100,164 || trainable%: 1.369798866581922 ``` Finally, we can use any training framework we like, or write our own fit loop, to train the `peft_model`. For a complete example, check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/multilayer_perceptron/multilayer_perceptron_lora.ipynb). ## timm models The [timm](https://huggingface.co/docs/timm/index) library contains a large number of pretrained computer vision models. Those can also be fine-tuned with PEFT. Let's check out how this works in practice. To start, ensure that timm is installed in the Python environment: ```bash python -m pip install -U timm ``` Next we load a timm model for an image classification task: ```python import timm num_classes = ... model_id = "timm/poolformer_m36.sail_in1k" model = timm.create_model(model_id, pretrained=True, num_classes=num_classes) ``` Again, we need to make a decision about what layers to apply LoRA to. Since LoRA supports 2D conv layers, and since those are a major building block of this model, we should apply LoRA to the 2D conv layers. To identify the names of those layers, let's look at all the layer names: ```python print([(n, type(m)) for n, m in MLP().named_modules()]) ``` This will print a very long list, we'll only show the first few: ``` [('', timm.models.metaformer.MetaFormer), ('stem', timm.models.metaformer.Stem), ('stem.conv', torch.nn.modules.conv.Conv2d), ('stem.norm', torch.nn.modules.linear.Identity), ('stages', torch.nn.modules.container.Sequential), ('stages.0', timm.models.metaformer.MetaFormerStage), ('stages.0.downsample', torch.nn.modules.linear.Identity), ('stages.0.blocks', torch.nn.modules.container.Sequential), ('stages.0.blocks.0', timm.models.metaformer.MetaFormerBlock), ('stages.0.blocks.0.norm1', timm.layers.norm.GroupNorm1), ('stages.0.blocks.0.token_mixer', timm.models.metaformer.Pooling), ('stages.0.blocks.0.token_mixer.pool', torch.nn.modules.pooling.AvgPool2d), ('stages.0.blocks.0.drop_path1', torch.nn.modules.linear.Identity), ('stages.0.blocks.0.layer_scale1', timm.models.metaformer.Scale), ('stages.0.blocks.0.res_scale1', torch.nn.modules.linear.Identity), ('stages.0.blocks.0.norm2', timm.layers.norm.GroupNorm1), ('stages.0.blocks.0.mlp', timm.layers.mlp.Mlp), ('stages.0.blocks.0.mlp.fc1', torch.nn.modules.conv.Conv2d), ('stages.0.blocks.0.mlp.act', torch.nn.modules.activation.GELU), ('stages.0.blocks.0.mlp.drop1', torch.nn.modules.dropout.Dropout), ('stages.0.blocks.0.mlp.norm', torch.nn.modules.linear.Identity), ('stages.0.blocks.0.mlp.fc2', torch.nn.modules.conv.Conv2d), ('stages.0.blocks.0.mlp.drop2', torch.nn.modules.dropout.Dropout), ('stages.0.blocks.0.drop_path2', torch.nn.modules.linear.Identity), ('stages.0.blocks.0.layer_scale2', timm.models.metaformer.Scale), ('stages.0.blocks.0.res_scale2', torch.nn.modules.linear.Identity), ('stages.0.blocks.1', timm.models.metaformer.MetaFormerBlock), ('stages.0.blocks.1.norm1', timm.layers.norm.GroupNorm1), ('stages.0.blocks.1.token_mixer', timm.models.metaformer.Pooling), ('stages.0.blocks.1.token_mixer.pool', torch.nn.modules.pooling.AvgPool2d), ... ('head.global_pool.flatten', torch.nn.modules.linear.Identity), ('head.norm', timm.layers.norm.LayerNorm2d), ('head.flatten', torch.nn.modules.flatten.Flatten), ('head.drop', torch.nn.modules.linear.Identity), ('head.fc', torch.nn.modules.linear.Linear)] ] ``` Upon closer inspection, we see that the 2D conv layers have names such as `"stages.0.blocks.0.mlp.fc1"` and `"stages.0.blocks.0.mlp.fc2"`. How can we match those layer names specifically? You can write a [regular expressions](https://docs.python.org/3/library/re.html) to match the layer names. For our case, the regex `r".*\.mlp\.fc\d"` should do the job. Furthermore, as in the first example, we should ensure that the output layer, in this case the classification head, is also updated. Looking at the end of the list printed above, we can see that it's named `'head.fc'`. With that in mind, here is our LoRA config: ```python config = LoraConfig(target_modules=r".*\.mlp\.fc\d", modules_to_save=["head.fc"]) ``` Then we only need to create the PEFT model by passing our base model and the config to `get_peft_model`: ```python peft_model = get_peft_model(model, config) peft_model.print_trainable_parameters() # prints trainable params: 1,064,454 || all params: 56,467,974 || trainable%: 1.88505789139876 ``` This shows us that we only need to train less than 2% of all parameters, which is a huge efficiency gain. For a complete example, check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/image_classification/image_classification_timm_peft_lora.ipynb). ## New transformers architectures When new popular transformers architectures are released, we do our best to quickly add them to PEFT. If you come across a transformers model that is not supported out of the box, don't worry, it will most likely still work if the config is set correctly. Specifically, you have to identify the layers that should be adapted and set them correctly when initializing the corresponding config class, e.g. `LoraConfig`. Here are some tips to help with this. As a first step, it is a good idea is to check the existing models for inspiration. You can find them inside of [constants.py](https://github.com/huggingface/peft/blob/main/src/peft/utils/constants.py) in the PEFT repository. Often, you'll find a similar architecture that uses the same names. For example, if the new model architecture is a variation of the "mistral" model and you want to apply LoRA, you can see that the entry for "mistral" in `TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING` contains `["q_proj", "v_proj"]`. This tells you that for "mistral" models, the `target_modules` for LoRA should be `["q_proj", "v_proj"]`: ```python from peft import LoraConfig, get_peft_model my_mistral_model = ... config = LoraConfig( target_modules=["q_proj", "v_proj"], ..., # other LoRA arguments ) peft_model = get_peft_model(my_mistral_model, config) ``` If that doesn't help, check the existing modules in your model architecture with the `named_modules` method and try to identify the attention layers, especially the key, query, and value layers. Those will often have names such as `c_attn`, `query`, `q_proj`, etc. The key layer is not always adapted, and ideally, you should check whether including it results in better performance. Additionally, linear layers are common targets to be adapted (e.g. in [QLoRA paper](https://arxiv.org/abs/2305.14314), authors suggest to adapt them as well). Their names will often contain the strings `fc` or `dense`. If you want to add a new model to PEFT, please create an entry in [constants.py](https://github.com/huggingface/peft/blob/main/src/peft/utils/constants.py) and open a pull request on the [repository](https://github.com/huggingface/peft/pulls). Don't forget to update the [README](https://github.com/huggingface/peft#models-support-matrix) as well.
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/developer_guides/contributing.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Contributing to PEFT We are happy to accept contributions to PEFT. If you plan to contribute, please read this document to make the process as smooth as possible. ## Installation The installation instructions can be found [here](https://huggingface.co/docs/peft/install). If you want to provide code contributions to PEFT, you should choose the "source" installation method. If you are new to creating a pull request, follow [these instructions from GitHub](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request). ## Running tests and code quality checks Regardless of the type of contribution (unless it’s only about the docs), you should run tests and code quality checks before creating a PR to ensure that your contribution doesn’t break anything and follows the standards of the project. We provide a Makefile to facilitate those steps. Run the code below for the unit test: ```sh make test ``` Run one of the following to either check or check and fix code quality and style: ```sh make quality # just check make style # check and fix ``` Running all the tests can take a couple of minutes. Therefore, during development, it can be useful to run only those tests specific to your change: ```sh pytest tests/ -k <name-of-test> ``` This should finish much quicker and allow faster iteration. Before creating the PR, however, please still run the whole test suite, as some changes can inadvertently break tests that at first glance are unrelated. If your change is specific to a hardware setting (e.g. it requires CUDA), take a look at `tests/test_gpu_examples.py` and `tests/test_common_gpu.py` – maybe it makes sense to add a test there. If your change could have an effect on saving and loading models, please run the tests with the `--regression` flag to trigger regression tests. It can happen that while you’re working on your PR, the underlying code base changes due to other changes being merged. If that happens – especially when there is a merge conflict – please update your branch to be on the latest changes. This can be a merge or a rebase, whatever you prefer. We will squash and merge the PR once it’s ready. ## PR description When opening the PR, please provide a nice description of the change you provide. If it relates to other issues or PRs, please reference them. Providing a good description will not only help the reviewers review your code better and faster, it can also later be used (as a basis) for the commit message, which helps with long term maintenance of the project. If your code makes some non-trivial changes, it can also be a good idea to add comments to the code to explain those changes. For example, if you had to iterate on your implementation multiple times because the most obvious way didn’t work, it’s a good indication that a code comment is needed. ## Providing a bugfix Please give a description of the circumstances that lead to the bug. If there is an existing issue, please link to it (e.g. “Resolves #12345”). Ideally, when a bugfix is provided, it should be accompanied by a test for this bug. The test should fail with the current code and pass with the bugfix. Add a comment to the test that references the issue or PR. Without such a test, it is difficult to prevent regressions in the future. ## Adding a new fine-tuning method New parameter-efficient fine-tuning methods are developed all the time. If you would like to add a new, promising method to PEFT, please follow these steps. **Requirements** 1. Please add a link to the source (usually a paper) of the method. 2. Some evidence should be provided that there is general interest in using the method. We will not add new methods that are freshly published but without evidence that there is demand for it. 3. Ideally, we want to not only add the implementation of the new method, but also examples (notebooks, scripts), documentation, and an extensive test suite that proves that the method works with a variety of tasks. However, this can be very daunting. Therefore, it is also acceptable to only provide the implementation and at least one working example. Documentation and tests can be added in follow up PRs. **Steps** Before you start to implement the new method, please open an issue on GitHub with your proposal. That way, the maintainers can give you some early feedback. When implementing the method, it makes sense to look for existing implementations that already exist as a guide. Moreover, when you structure your code, please take inspiration from the other PEFT methods. For example, if your method is similar to LoRA, it makes sense to structure your code similarly or even re-use some functions or classes where it makes sense (but don’t overdo it, some code duplication is okay). Once you have something that seems to be working, don’t hesitate to create a draft PR, even if it’s not in a mergeable state yet. The maintainers will be happy to give you feedback and guidance along the way. ## Adding other features It is best if you first open an issue on GitHub with a proposal to add the new feature. That way, you can discuss with the maintainers if it makes sense to add the feature before spending too much time on implementing it. New features should generally be accompanied by tests and documentation or examples. Without the latter, users will have a hard time discovering your cool new feature. Changes to the code should be implemented in a backward-compatible way. For example, existing code should continue to work the same way after the feature is merged.
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/developer_guides/low_level_api.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PEFT as a utility library Let's cover in this section how you can leverage PEFT's low level API to inject trainable adapters into any `torch` module. The development of this API has been motivated by the need for super users to not rely on modeling classes that are exposed in PEFT library and still be able to use adapter methods such as LoRA, IA3 and AdaLoRA. ## Supported tuner types Currently the supported adapter types are the 'injectable' adapters, meaning adapters where an inplace modification of the model is sufficient to correctly perform the fine tuning. As such, only [LoRA](../conceptual_guides/lora), AdaLoRA and [IA3](../conceptual_guides/ia3) are currently supported in this API. ## `inject_adapter_in_model` method To perform the adapter injection, simply use `inject_adapter_in_model` method that takes 3 arguments, the PEFT config and the model itself and an optional adapter name. You can also attach multiple adapters in the model if you call multiple times `inject_adapter_in_model` with different adapter names. Below is a basic example usage of how to inject LoRA adapters into the submodule `linear` of the module `DummyModel`. ```python import torch from peft import inject_adapter_in_model, LoraConfig class DummyModel(torch.nn.Module): def __init__(self): super().__init__() self.embedding = torch.nn.Embedding(10, 10) self.linear = torch.nn.Linear(10, 10) self.lm_head = torch.nn.Linear(10, 10) def forward(self, input_ids): x = self.embedding(input_ids) x = self.linear(x) x = self.lm_head(x) return x lora_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=64, bias="none", target_modules=["linear"], ) model = DummyModel() model = inject_adapter_in_model(lora_config, model) dummy_inputs = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]) dummy_outputs = model(dummy_inputs) ``` If you print the model, you will notice that the adapters have been correctly injected into the model ```bash DummyModel( (embedding): Embedding(10, 10) (linear): Linear( in_features=10, out_features=10, bias=True (lora_dropout): ModuleDict( (default): Dropout(p=0.1, inplace=False) ) (lora_A): ModuleDict( (default): Linear(in_features=10, out_features=64, bias=False) ) (lora_B): ModuleDict( (default): Linear(in_features=64, out_features=10, bias=False) ) (lora_embedding_A): ParameterDict() (lora_embedding_B): ParameterDict() ) (lm_head): Linear(in_features=10, out_features=10, bias=True) ) ``` Note that it should be up to users to properly take care of saving the adapters (in case they want to save adapters only), as `model.state_dict()` will return the full state dict of the model. In case you want to extract the adapters state dict you can use the `get_peft_model_state_dict` method: ```python from peft import get_peft_model_state_dict peft_state_dict = get_peft_model_state_dict(model) print(peft_state_dict) ``` ## Pros and cons When to use this API and when to not use it? Let's discuss in this section the pros and cons Pros: - The model gets modified in-place, meaning the model will preserve all its original attributes and methods - Works for any torch module, and any modality (vision, text, multi-modal) Cons: - You need to manually writing Hugging Face `from_pretrained` and `save_pretrained` utility methods if you want to easily save / load adapters from the Hugging Face Hub. - You cannot use any of the utility method provided by `PeftModel` such as disabling adapters, merging adapters, etc.
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/task_guides/token-classification-lora.md
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LoRA for token classification Low-Rank Adaptation (LoRA) is a reparametrization method that aims to reduce the number of trainable parameters with low-rank representations. The weight matrix is broken down into low-rank matrices that are trained and updated. All the pretrained model parameters remain frozen. After training, the low-rank matrices are added back to the original weights. This makes it more efficient to store and train a LoRA model because there are significantly fewer parameters. <Tip> 💡 Read [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) to learn more about LoRA. </Tip> This guide will show you how to train a [`roberta-large`](https://huggingface.co/roberta-large) model with LoRA on the [BioNLP2004](https://huggingface.co/datasets/tner/bionlp2004) dataset for token classification. Before you begin, make sure you have all the necessary libraries installed: ```bash !pip install -q peft transformers datasets evaluate seqeval ``` ## Setup Let's start by importing all the necessary libraries you'll need: - 🤗 Transformers for loading the base `roberta-large` model and tokenizer, and handling the training loop - 🤗 Datasets for loading and preparing the `bionlp2004` dataset for training - 🤗 Evaluate for evaluating the model's performance - 🤗 PEFT for setting up the LoRA configuration and creating the PEFT model ```py from datasets import load_dataset from transformers import ( AutoModelForTokenClassification, AutoTokenizer, DataCollatorForTokenClassification, TrainingArguments, Trainer, ) from peft import get_peft_config, PeftModel, PeftConfig, get_peft_model, LoraConfig, TaskType import evaluate import torch import numpy as np model_checkpoint = "roberta-large" lr = 1e-3 batch_size = 16 num_epochs = 10 ``` ## Load dataset and metric The [BioNLP2004](https://huggingface.co/datasets/tner/bionlp2004) dataset includes tokens and tags for biological structures like DNA, RNA and proteins. Load the dataset: ```py bionlp = load_dataset("tner/bionlp2004") bionlp["train"][0] { "tokens": [ "Since", "HUVECs", "released", "superoxide", "anions", "in", "response", "to", "TNF", ",", "and", "H2O2", "induces", "VCAM-1", ",", "PDTC", "may", "act", "as", "a", "radical", "scavenger", ".", ], "tags": [0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0], } ``` The `tags` values are defined in the label ids [dictionary](https://huggingface.co/datasets/tner/bionlp2004#label-id). The letter that prefixes each label indicates the token position: `B` is for the first token of an entity, `I` is for a token inside the entity, and `0` is for a token that is not part of an entity. ```py { "O": 0, "B-DNA": 1, "I-DNA": 2, "B-protein": 3, "I-protein": 4, "B-cell_type": 5, "I-cell_type": 6, "B-cell_line": 7, "I-cell_line": 8, "B-RNA": 9, "I-RNA": 10, } ``` Then load the [`seqeval`](https://huggingface.co/spaces/evaluate-metric/seqeval) framework which includes several metrics - precision, accuracy, F1, and recall - for evaluating sequence labeling tasks. ```py seqeval = evaluate.load("seqeval") ``` Now you can write an evaluation function to compute the metrics from the model predictions and labels, and return the precision, recall, F1, and accuracy scores: ```py label_list = [ "O", "B-DNA", "I-DNA", "B-protein", "I-protein", "B-cell_type", "I-cell_type", "B-cell_line", "I-cell_line", "B-RNA", "I-RNA", ] def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = seqeval.compute(predictions=true_predictions, references=true_labels) return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } ``` ## Preprocess dataset Initialize a tokenizer and make sure you set `is_split_into_words=True` because the text sequence has already been split into words. However, this doesn't mean it is tokenized yet (even though it may look like it!), and you'll need to further tokenize the words into subwords. ```py tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, add_prefix_space=True) ``` You'll also need to write a function to: 1. Map each token to their respective word with the [`~transformers.BatchEncoding.word_ids`] method. 2. Ignore the special tokens by setting them to `-100`. 3. Label the first token of a given entity. ```py def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True) labels = [] for i, label in enumerate(examples[f"tags"]): word_ids = tokenized_inputs.word_ids(batch_index=i) previous_word_idx = None label_ids = [] for word_idx in word_ids: if word_idx is None: label_ids.append(-100) elif word_idx != previous_word_idx: label_ids.append(label[word_idx]) else: label_ids.append(-100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs ``` Use [`~datasets.Dataset.map`] to apply the `tokenize_and_align_labels` function to the dataset: ```py tokenized_bionlp = bionlp.map(tokenize_and_align_labels, batched=True) ``` Finally, create a data collator to pad the examples to the longest length in a batch: ```py data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer) ``` ## Train Now you're ready to create a [`PeftModel`]. Start by loading the base `roberta-large` model, the number of expected labels, and the `id2label` and `label2id` dictionaries: ```py id2label = { 0: "O", 1: "B-DNA", 2: "I-DNA", 3: "B-protein", 4: "I-protein", 5: "B-cell_type", 6: "I-cell_type", 7: "B-cell_line", 8: "I-cell_line", 9: "B-RNA", 10: "I-RNA", } label2id = { "O": 0, "B-DNA": 1, "I-DNA": 2, "B-protein": 3, "I-protein": 4, "B-cell_type": 5, "I-cell_type": 6, "B-cell_line": 7, "I-cell_line": 8, "B-RNA": 9, "I-RNA": 10, } model = AutoModelForTokenClassification.from_pretrained( model_checkpoint, num_labels=11, id2label=id2label, label2id=label2id ) ``` Define the [`LoraConfig`] with: - `task_type`, token classification (`TaskType.TOKEN_CLS`) - `r`, the dimension of the low-rank matrices - `lora_alpha`, scaling factor for the weight matrices - `lora_dropout`, dropout probability of the LoRA layers - `bias`, set to `all` to train all bias parameters <Tip> 💡 The weight matrix is scaled by `lora_alpha/r`, and a higher `lora_alpha` value assigns more weight to the LoRA activations. For performance, we recommend setting `bias` to `None` first, and then `lora_only`, before trying `all`. </Tip> ```py peft_config = LoraConfig( task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all" ) ``` Pass the base model and `peft_config` to the [`get_peft_model`] function to create a [`PeftModel`]. You can check out how much more efficient training the [`PeftModel`] is compared to fully training the base model by printing out the trainable parameters: ```py model = get_peft_model(model, peft_config) model.print_trainable_parameters() "trainable params: 1855499 || all params: 355894283 || trainable%: 0.5213624069370061" ``` From the 🤗 Transformers library, create a [`~transformers.TrainingArguments`] class and specify where you want to save the model to, the training hyperparameters, how to evaluate the model, and when to save the checkpoints: ```py training_args = TrainingArguments( output_dir="roberta-large-lora-token-classification", learning_rate=lr, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, num_train_epochs=num_epochs, weight_decay=0.01, evaluation_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, ) ``` Pass the model, `TrainingArguments`, datasets, tokenizer, data collator and evaluation function to the [`~transformers.Trainer`] class. The `Trainer` handles the training loop for you, and when you're ready, call [`~transformers.Trainer.train`] to begin! ```py trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_bionlp["train"], eval_dataset=tokenized_bionlp["validation"], tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) trainer.train() ``` ## Share model Once training is complete, you can store and share your model on the Hub if you'd like. Log in to your Hugging Face account and enter your token when prompted: ```py from huggingface_hub import notebook_login notebook_login() ``` Upload the model to a specific model repository on the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method: ```py model.push_to_hub("your-name/roberta-large-lora-token-classification") ``` ## Inference To use your model for inference, load the configuration and model: ```py peft_model_id = "stevhliu/roberta-large-lora-token-classification" config = PeftConfig.from_pretrained(peft_model_id) inference_model = AutoModelForTokenClassification.from_pretrained( config.base_model_name_or_path, num_labels=11, id2label=id2label, label2id=label2id ) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(inference_model, peft_model_id) ``` Get some text to tokenize: ```py text = "The activation of IL-2 gene expression and NF-kappa B through CD28 requires reactive oxygen production by 5-lipoxygenase." inputs = tokenizer(text, return_tensors="pt") ``` Pass the inputs to the model, and print out the model prediction for each token: ```py with torch.no_grad(): logits = model(**inputs).logits tokens = inputs.tokens() predictions = torch.argmax(logits, dim=2) for token, prediction in zip(tokens, predictions[0].numpy()): print((token, model.config.id2label[prediction])) ("<s>", "O") ("The", "O") ("Ġactivation", "O") ("Ġof", "O") ("ĠIL", "B-DNA") ("-", "O") ("2", "I-DNA") ("Ġgene", "O") ("Ġexpression", "O") ("Ġand", "O") ("ĠNF", "B-protein") ("-", "O") ("k", "I-protein") ("appa", "I-protein") ("ĠB", "I-protein") ("Ġthrough", "O") ("ĠCD", "B-protein") ("28", "I-protein") ("Ġrequires", "O") ("Ġreactive", "O") ("Ġoxygen", "O") ("Ġproduction", "O") ("Ġby", "O") ("Ġ5", "B-protein") ("-", "O") ("lip", "I-protein") ("oxy", "I-protein") ("gen", "I-protein") ("ase", "I-protein") (".", "O") ("</s>", "O") ```
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/task_guides/int8-asr.md
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # int8 training for automatic speech recognition Quantization reduces the precision of floating point data types, decreasing the memory required to store model weights. However, quantization degrades inference performance because you lose information when you reduce the precision. 8-bit or `int8` quantization uses only a quarter precision, but it does not degrade performance because it doesn't just drop the bits or data. Instead, `int8` quantization *rounds* from one data type to another. <Tip> 💡 Read the [LLM.int8(): 8-bit Matrix Multiplication for Transformers at Scale](https://arxiv.org/abs/2208.07339) paper to learn more, or you can take a look at the corresponding [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration) for a gentler introduction. </Tip> This guide will show you how to train a [`openai/whisper-large-v2`](https://huggingface.co/openai/whisper-large-v2) model for multilingual automatic speech recognition (ASR) using a combination of `int8` quantization and LoRA. You'll train Whisper for multilingual ASR on Marathi from the [Common Voice 11.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0) dataset. Before you start, make sure you have all the necessary libraries installed: ```bash !pip install -q peft transformers datasets accelerate evaluate jiwer bitsandbytes ``` ## Setup Let's take care of some of the setup first so you can start training faster later. Set the `CUDA_VISIBLE_DEVICES` to `0` to use the first GPU on your machine. Then you can specify the model name (either a Hub model repository id or a path to a directory containing the model), language and language abbreviation to train on, the task type, and the dataset name: ```py import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" model_name_or_path = "openai/whisper-large-v2" language = "Marathi" language_abbr = "mr" task = "transcribe" dataset_name = "mozilla-foundation/common_voice_11_0" ``` You can also log in to your Hugging Face account to save and share your trained model on the Hub if you'd like: ```py from huggingface_hub import notebook_login notebook_login() ``` ## Load dataset and metric The [Common Voice 11.0](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0) dataset contains many hours of recorded speech in many different languages. This guide uses the [Marathi](https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/viewer/mr/train) language as an example, but feel free to use any other language you're interested in. Initialize a [`~datasets.DatasetDict`] structure, and load the [`train`] (load both the `train+validation` split into `train`) and [`test`] splits from the dataset into it: ```py from datasets import load_dataset from datasets import load_dataset, DatasetDict common_voice = DatasetDict() common_voice["train"] = load_dataset(dataset_name, language_abbr, split="train+validation", use_auth_token=True) common_voice["test"] = load_dataset(dataset_name, language_abbr, split="test", use_auth_token=True) common_voice["train"][0] ``` ## Preprocess dataset Let's prepare the dataset for training. Load a feature extractor, tokenizer, and processor. You should also pass the language and task to the tokenizer and processor so they know how to process the inputs: ```py from transformers import AutoFeatureExtractor, AutoTokenizer, AutoProcessor feature_extractor = AutoFeatureExtractor.from_pretrained(model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, language=language, task=task) processor = AutoProcessor.from_pretrained(model_name_or_path, language=language, task=task) ``` You'll only be training on the `sentence` and `audio` columns, so you can remove the rest of the metadata with [`~datasets.Dataset.remove_columns`]: ```py common_voice = common_voice.remove_columns( ["accent", "age", "client_id", "down_votes", "gender", "locale", "path", "segment", "up_votes"] ) common_voice["train"][0] { "audio": { "path": "/root/.cache/huggingface/datasets/downloads/extracted/f7e1ef6a2d14f20194999aad5040c5d4bb3ead1377de3e1bbc6e9dba34d18a8a/common_voice_mr_30585613.mp3", "array": array( [1.13686838e-13, -1.42108547e-13, -1.98951966e-13, ..., 4.83472422e-06, 3.54798703e-06, 1.63231743e-06] ), "sampling_rate": 48000, }, "sentence": "आईचे आजारपण वाढत चालले, तसतशी मथीही नीट खातपीतनाशी झाली.", } ``` If you look at the `sampling_rate`, you'll see the audio was sampled at 48kHz. The Whisper model was pretrained on audio inputs at 16kHZ which means you'll need to downsample the audio inputs to match what the model was pretrained on. Downsample the audio by using the [`~datasets.Dataset.cast_column`] method on the `audio` column, and set the `sampling_rate` to 16kHz. The audio input is resampled on the fly the next time you call it: ```py from datasets import Audio common_voice = common_voice.cast_column("audio", Audio(sampling_rate=16000)) common_voice["train"][0] { "audio": { "path": "/root/.cache/huggingface/datasets/downloads/extracted/f7e1ef6a2d14f20194999aad5040c5d4bb3ead1377de3e1bbc6e9dba34d18a8a/common_voice_mr_30585613.mp3", "array": array( [-3.06954462e-12, -3.63797881e-12, -4.54747351e-12, ..., -7.74800901e-06, -1.74738125e-06, 4.36312439e-06] ), "sampling_rate": 16000, }, "sentence": "आईचे आजारपण वाढत चालले, तसतशी मथीही नीट खातपीतनाशी झाली.", } ``` Once you've cleaned up the dataset, you can write a function to generate the correct model inputs. The function should: 1. Resample the audio inputs to 16kHZ by loading the `audio` column. 2. Compute the input features from the audio `array` using the feature extractor. 3. Tokenize the `sentence` column to the input labels. ```py def prepare_dataset(batch): audio = batch["audio"] batch["input_features"] = feature_extractor(audio["array"], sampling_rate=audio["sampling_rate"]).input_features[0] batch["labels"] = tokenizer(batch["sentence"]).input_ids return batch ``` Apply the `prepare_dataset` function to the dataset with the [`~datasets.Dataset.map`] function, and set the `num_proc` argument to `2` to enable multiprocessing (if `map` hangs, then set `num_proc=1`): ```py common_voice = common_voice.map(prepare_dataset, remove_columns=common_voice.column_names["train"], num_proc=2) ``` Finally, create a `DataCollator` class to pad the labels in each batch to the maximum length, and replace padding with `-100` so they're ignored by the loss function. Then initialize an instance of the data collator: ```py import torch from dataclasses import dataclass from typing import Any, Dict, List, Union @dataclass class DataCollatorSpeechSeq2SeqWithPadding: processor: Any def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: input_features = [{"input_features": feature["input_features"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") label_features = [{"input_ids": feature["labels"]} for feature in features] labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batch data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor) ``` ## Train Now that the dataset is ready, you can turn your attention to the model. Start by loading the pretrained [`openai/whisper-large-v2`]() model from [`~transformers.AutoModelForSpeechSeq2Seq`], and make sure to set the [`~transformers.BitsAndBytesConfig.load_in_8bit`] argument to `True` to enable `int8` quantization. The `device_map=auto` argument automatically determines how to load and store the model weights: ```py from transformers import AutoModelForSpeechSeq2Seq model = AutoModelForSpeechSeq2Seq.from_pretrained(model_name_or_path, load_in_8bit=True, device_map="auto") ``` You should configure `forced_decoder_ids=None` because no tokens are used before sampling, and you won't need to suppress any tokens during generation either: ```py model.config.forced_decoder_ids = None model.config.suppress_tokens = [] ``` To get the model ready for `int8` quantization, use the utility function [`prepare_model_for_int8_training`](https://github.com/huggingface/peft/blob/34027fe813756897767b9a6f19ae7f1c4c7b418c/src/peft/utils/other.py#L35) to handle the following: - casts all the non `int8` modules to full precision (`fp32`) for stability - adds a forward hook to the input embedding layer to calculate the gradients of the input hidden states - enables gradient checkpointing for more memory-efficient training ```py from peft import prepare_model_for_int8_training model = prepare_model_for_int8_training(model) ``` Let's also apply LoRA to the training to make it even more efficient. Load a [`~peft.LoraConfig`] and configure the following parameters: - `r`, the dimension of the low-rank matrices - `lora_alpha`, scaling factor for the weight matrices - `target_modules`, the name of the attention matrices to apply LoRA to (`q_proj` and `v_proj`, or query and value in this case) - `lora_dropout`, dropout probability of the LoRA layers - `bias`, set to `none` <Tip> 💡 The weight matrix is scaled by `lora_alpha/r`, and a higher `lora_alpha` value assigns more weight to the LoRA activations. For performance, we recommend setting bias to `None` first, and then `lora_only`, before trying `all`. </Tip> ```py from peft import LoraConfig, PeftModel, LoraModel, LoraConfig, get_peft_model config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none") ``` After you set up the [`~peft.LoraConfig`], wrap it and the base model with the [`get_peft_model`] function to create a [`PeftModel`]. Print out the number of trainable parameters to see how much more efficient LoRA is compared to fully training the model! ```py model = get_peft_model(model, config) model.print_trainable_parameters() "trainable params: 15728640 || all params: 1559033600 || trainable%: 1.0088711365810203" ``` Now you're ready to define some training hyperparameters in the [`~transformers.Seq2SeqTrainingArguments`] class, such as where to save the model to, batch size, learning rate, and number of epochs to train for. The [`PeftModel`] doesn't have the same signature as the base model, so you'll need to explicitly set `remove_unused_columns=False` and `label_names=["labels"]`. ```py from transformers import Seq2SeqTrainingArguments training_args = Seq2SeqTrainingArguments( output_dir="your-name/int8-whisper-large-v2-asr", per_device_train_batch_size=8, gradient_accumulation_steps=1, learning_rate=1e-3, warmup_steps=50, num_train_epochs=3, evaluation_strategy="epoch", fp16=True, per_device_eval_batch_size=8, generation_max_length=128, logging_steps=25, remove_unused_columns=False, label_names=["labels"], ) ``` It is also a good idea to write a custom [`~transformers.TrainerCallback`] to save model checkpoints during training: ```py from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR class SavePeftModelCallback(TrainerCallback): def on_save( self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs, ): checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") peft_model_path = os.path.join(checkpoint_folder, "adapter_model") kwargs["model"].save_pretrained(peft_model_path) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") if os.path.exists(pytorch_model_path): os.remove(pytorch_model_path) return control ``` Pass the `Seq2SeqTrainingArguments`, model, datasets, data collator, tokenizer, and callback to the [`~transformers.Seq2SeqTrainer`]. You can optionally set `model.config.use_cache = False` to silence any warnings. Once everything is ready, call [`~transformers.Trainer.train`] to start training! ```py from transformers import Seq2SeqTrainer, TrainerCallback, Seq2SeqTrainingArguments, TrainerState, TrainerControl trainer = Seq2SeqTrainer( args=training_args, model=model, train_dataset=common_voice["train"], eval_dataset=common_voice["test"], data_collator=data_collator, tokenizer=processor.feature_extractor, callbacks=[SavePeftModelCallback], ) model.config.use_cache = False trainer.train() ``` ## Evaluate [Word error rate](https://huggingface.co/spaces/evaluate-metric/wer) (WER) is a common metric for evaluating ASR models. Load the WER metric from 🤗 Evaluate: ```py import evaluate metric = evaluate.load("wer") ``` Write a loop to evaluate the model performance. Set the model to evaluation mode first, and write the loop with [`torch.cuda.amp.autocast()`](https://pytorch.org/docs/stable/amp.html) because `int8` training requires autocasting. Then, pass a batch of examples to the model to evaluate. Get the decoded predictions and labels, and add them as a batch to the WER metric before calling `compute` to get the final WER score: ```py from torch.utils.data import DataLoader from tqdm import tqdm import numpy as np import gc eval_dataloader = DataLoader(common_voice["test"], batch_size=8, collate_fn=data_collator) model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): with torch.cuda.amp.autocast(): with torch.no_grad(): generated_tokens = ( model.generate( input_features=batch["input_features"].to("cuda"), decoder_input_ids=batch["labels"][:, :4].to("cuda"), max_new_tokens=255, ) .cpu() .numpy() ) labels = batch["labels"].cpu().numpy() labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) metric.add_batch( predictions=decoded_preds, references=decoded_labels, ) del generated_tokens, labels, batch gc.collect() wer = 100 * metric.compute() print(f"{wer=}") ``` ## Share model Once you're happy with your results, you can upload your model to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method: ```py model.push_to_hub("your-name/int8-whisper-large-v2-asr") ``` ## Inference Let's test the model out now! Instantiate the model configuration from [`PeftConfig`], and from here, you can use the configuration to load the base and [`PeftModel`], tokenizer, processor, and feature extractor. Remember to define the `language` and `task` in the tokenizer, processor, and `forced_decoder_ids`: ```py from peft import PeftModel, PeftConfig peft_model_id = "smangrul/openai-whisper-large-v2-LORA-colab" language = "Marathi" task = "transcribe" peft_config = PeftConfig.from_pretrained(peft_model_id) model = WhisperForConditionalGeneration.from_pretrained( peft_config.base_model_name_or_path, load_in_8bit=True, device_map="auto" ) model = PeftModel.from_pretrained(model, peft_model_id) tokenizer = WhisperTokenizer.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) feature_extractor = processor.feature_extractor forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task=task) ``` Load an audio sample (you can listen to it in the [Dataset Preview](https://huggingface.co/datasets/stevhliu/dummy)) to transcribe, and the [`~transformers.AutomaticSpeechRecognitionPipeline`]: ```py from transformers import AutomaticSpeechRecognitionPipeline audio = "https://huggingface.co/datasets/stevhliu/dummy/resolve/main/mrt_01523_00028548203.wav" pipeline = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) ``` Then use the pipeline with autocast as a context manager on the audio sample: ```py with torch.cuda.amp.autocast(): text = pipe(audio, generate_kwargs={"forced_decoder_ids": forced_decoder_ids}, max_new_tokens=255)["text"] text "मी तुमच्यासाठी काही करू शकतो का?" ```
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/task_guides/dreambooth_lora.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DreamBooth fine-tuning with LoRA This guide demonstrates how to use LoRA, a low-rank approximation technique, to fine-tune DreamBooth with the `CompVis/stable-diffusion-v1-4` model. Although LoRA was initially designed as a technique for reducing the number of trainable parameters in large-language models, the technique can also be applied to diffusion models. Performing a complete model fine-tuning of diffusion models is a time-consuming task, which is why lightweight techniques like DreamBooth or Textual Inversion gained popularity. With the introduction of LoRA, customizing and fine-tuning a model on a specific dataset has become even faster. In this guide we'll be using a DreamBooth fine-tuning script that is available in [PEFT's GitHub repo](https://github.com/huggingface/peft/tree/main/examples/lora_dreambooth). Feel free to explore it and learn how things work. ## Set up your environment Start by cloning the PEFT repository: ```bash git clone https://github.com/huggingface/peft ``` Navigate to the directory containing the training scripts for fine-tuning Dreambooth with LoRA: ```bash cd peft/examples/lora_dreambooth ``` Set up your environment: install PEFT, and all the required libraries. At the time of writing this guide we recommend installing PEFT from source. ```bash pip install -r requirements.txt pip install git+https://github.com/huggingface/peft ``` ## Fine-tuning DreamBooth Prepare the images that you will use for fine-tuning the model. Set up a few environment variables: ```bash export MODEL_NAME="CompVis/stable-diffusion-v1-4" export INSTANCE_DIR="path-to-instance-images" export CLASS_DIR="path-to-class-images" export OUTPUT_DIR="path-to-save-model" ``` Here: - `INSTANCE_DIR`: The directory containing the images that you intend to use for training your model. - `CLASS_DIR`: The directory containing class-specific images. In this example, we use prior preservation to avoid overfitting and language-drift. For prior preservation, you need other images of the same class as part of the training process. However, these images can be generated and the training script will save them to a local path you specify here. - `OUTPUT_DIR`: The destination folder for storing the trained model's weights. To learn more about DreamBooth fine-tuning with prior-preserving loss, check out the [Diffusers documentation](https://huggingface.co/docs/diffusers/training/dreambooth#finetuning-with-priorpreserving-loss). Launch the training script with `accelerate` and pass hyperparameters, as well as LoRa-specific arguments to it such as: - `use_lora`: Enables LoRa in the training script. - `lora_r`: The dimension used by the LoRA update matrices. - `lora_alpha`: Scaling factor. - `lora_text_encoder_r`: LoRA rank for text encoder. - `lora_text_encoder_alpha`: LoRA alpha (scaling factor) for text encoder. Here's what the full set of script arguments may look like: ```bash accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --class_data_dir=$CLASS_DIR \ --output_dir=$OUTPUT_DIR \ --train_text_encoder \ --with_prior_preservation --prior_loss_weight=1.0 \ --num_dataloader_workers=1 \ --instance_prompt="a photo of sks dog" \ --class_prompt="a photo of dog" \ --resolution=512 \ --train_batch_size=1 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --num_class_images=200 \ --use_lora \ --lora_r 16 \ --lora_alpha 27 \ --lora_text_encoder_r 16 \ --lora_text_encoder_alpha 17 \ --learning_rate=1e-4 \ --gradient_accumulation_steps=1 \ --gradient_checkpointing \ --max_train_steps=800 ``` If you are running this script on Windows, you may need to set the `--num_dataloader_workers` to 0. ## Inference with a single adapter To run inference with the fine-tuned model, first specify the base model with which the fine-tuned LoRA weights will be combined: ```python import os import torch from diffusers import StableDiffusionPipeline from peft import PeftModel, LoraConfig MODEL_NAME = "CompVis/stable-diffusion-v1-4" ``` Next, add a function that will create a Stable Diffusion pipeline for image generation. It will combine the weights of the base model with the fine-tuned LoRA weights using `LoraConfig`. ```python def get_lora_sd_pipeline( ckpt_dir, base_model_name_or_path=None, dtype=torch.float16, device="cuda", adapter_name="default" ): unet_sub_dir = os.path.join(ckpt_dir, "unet") text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder") if os.path.exists(text_encoder_sub_dir) and base_model_name_or_path is None: config = LoraConfig.from_pretrained(text_encoder_sub_dir) base_model_name_or_path = config.base_model_name_or_path if base_model_name_or_path is None: raise ValueError("Please specify the base model name or path") pipe = StableDiffusionPipeline.from_pretrained(base_model_name_or_path, torch_dtype=dtype).to(device) pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name) if os.path.exists(text_encoder_sub_dir): pipe.text_encoder = PeftModel.from_pretrained( pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name ) if dtype in (torch.float16, torch.bfloat16): pipe.unet.half() pipe.text_encoder.half() pipe.to(device) return pipe ``` Now you can use the function above to create a Stable Diffusion pipeline using the LoRA weights that you have created during the fine-tuning step. Note, if you're running inference on the same machine, the path you specify here will be the same as `OUTPUT_DIR`. ```python pipe = get_lora_sd_pipeline(Path("path-to-saved-model"), adapter_name="dog") ``` Once you have the pipeline with your fine-tuned model, you can use it to generate images: ```python prompt = "sks dog playing fetch in the park" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image.save("DESTINATION_PATH_FOR_THE_IMAGE") ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/lora_dreambooth_dog_park.png" alt="Generated image of a dog in a park"/> </div> ## Multi-adapter inference With PEFT you can combine multiple adapters for inference. In the previous example you have fine-tuned Stable Diffusion on some dog images. The pipeline created based on these weights got a name - `adapter_name="dog"`. Now, suppose you also fine-tuned this base model on images of a crochet toy. Let's see how we can use both adapters. First, you'll need to perform all the steps as in the single adapter inference example: 1. Specify the base model. 2. Add a function that creates a Stable Diffusion pipeline for image generation uses LoRA weights. 3. Create a `pipe` with `adapter_name="dog"` based on the model fine-tuned on dog images. Next, you're going to need a few more helper functions. To load another adapter, create a `load_adapter()` function that leverages `load_adapter()` method of `PeftModel` (e.g. `pipe.unet.load_adapter(peft_model_path, adapter_name)`): ```python def load_adapter(pipe, ckpt_dir, adapter_name): unet_sub_dir = os.path.join(ckpt_dir, "unet") text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder") pipe.unet.load_adapter(unet_sub_dir, adapter_name=adapter_name) if os.path.exists(text_encoder_sub_dir): pipe.text_encoder.load_adapter(text_encoder_sub_dir, adapter_name=adapter_name) ``` To switch between adapters, write a function that uses `set_adapter()` method of `PeftModel` (see `pipe.unet.set_adapter(adapter_name)`) ```python def set_adapter(pipe, adapter_name): pipe.unet.set_adapter(adapter_name) if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.set_adapter(adapter_name) ``` Finally, add a function to create weighted LoRA adapter. ```python def create_weighted_lora_adapter(pipe, adapters, weights, adapter_name="default"): pipe.unet.add_weighted_adapter(adapters, weights, adapter_name) if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.add_weighted_adapter(adapters, weights, adapter_name) return pipe ``` Let's load the second adapter from the model fine-tuned on images of a crochet toy, and give it a unique name: ```python load_adapter(pipe, Path("path-to-the-second-saved-model"), adapter_name="crochet") ``` Create a pipeline using weighted adapters: ```python pipe = create_weighted_lora_adapter(pipe, ["crochet", "dog"], [1.0, 1.05], adapter_name="crochet_dog") ``` Now you can switch between adapters. If you'd like to generate more dog images, set the adapter to `"dog"`: ```python set_adapter(pipe, adapter_name="dog") prompt = "sks dog in a supermarket isle" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/lora_dreambooth_dog_supermarket.png" alt="Generated image of a dog in a supermarket"/> </div> In the same way, you can switch to the second adapter: ```python set_adapter(pipe, adapter_name="crochet") prompt = "a fish rendered in the style of <1>" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/lora_dreambooth_fish.png" alt="Generated image of a crochet fish"/> </div> Finally, you can use combined weighted adapters: ```python set_adapter(pipe, adapter_name="crochet_dog") prompt = "sks dog rendered in the style of <1>, close up portrait, 4K HD" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/lora_dreambooth_crochet_dog.png" alt="Generated image of a crochet dog"/> </div>
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/task_guides/clm-prompt-tuning.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Prompt tuning for causal language modeling [[open-in-colab]] Prompting helps guide language model behavior by adding some input text specific to a task. Prompt tuning is an additive method for only training and updating the newly added prompt tokens to a pretrained model. This way, you can use one pretrained model whose weights are frozen, and train and update a smaller set of prompt parameters for each downstream task instead of fully finetuning a separate model. As models grow larger and larger, prompt tuning can be more efficient, and results are even better as model parameters scale. <Tip> 💡 Read [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/abs/2104.08691) to learn more about prompt tuning. </Tip> This guide will show you how to apply prompt tuning to train a [`bloomz-560m`](https://huggingface.co/bigscience/bloomz-560m) model on the `twitter_complaints` subset of the [RAFT](https://huggingface.co/datasets/ought/raft) dataset. Before you begin, make sure you have all the necessary libraries installed: ```bash !pip install -q peft transformers datasets ``` ## Setup Start by defining the model and tokenizer, the dataset and the dataset columns to train on, some training hyperparameters, and the [`PromptTuningConfig`]. The [`PromptTuningConfig`] contains information about the task type, the text to initialize the prompt embedding, the number of virtual tokens, and the tokenizer to use: ```py from transformers import AutoModelForCausalLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup from peft import get_peft_config, get_peft_model, PromptTuningInit, PromptTuningConfig, TaskType, PeftType import torch from datasets import load_dataset import os from torch.utils.data import DataLoader from tqdm import tqdm device = "cuda" model_name_or_path = "bigscience/bloomz-560m" tokenizer_name_or_path = "bigscience/bloomz-560m" peft_config = PromptTuningConfig( task_type=TaskType.CAUSAL_LM, prompt_tuning_init=PromptTuningInit.TEXT, num_virtual_tokens=8, prompt_tuning_init_text="Classify if the tweet is a complaint or not:", tokenizer_name_or_path=model_name_or_path, ) dataset_name = "twitter_complaints" checkpoint_name = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}_v1.pt".replace( "/", "_" ) text_column = "Tweet text" label_column = "text_label" max_length = 64 lr = 3e-2 num_epochs = 50 batch_size = 8 ``` ## Load dataset For this guide, you'll load the `twitter_complaints` subset of the [RAFT](https://huggingface.co/datasets/ought/raft) dataset. This subset contains tweets that are labeled either `complaint` or `no complaint`: ```py dataset = load_dataset("ought/raft", dataset_name) dataset["train"][0] {"Tweet text": "@HMRCcustomers No this is my first job", "ID": 0, "Label": 2} ``` To make the `Label` column more readable, replace the `Label` value with the corresponding label text and store them in a `text_label` column. You can use the [`~datasets.Dataset.map`] function to apply this change over the entire dataset in one step: ```py classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names] dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) dataset["train"][0] {"Tweet text": "@HMRCcustomers No this is my first job", "ID": 0, "Label": 2, "text_label": "no complaint"} ``` ## Preprocess dataset Next, you'll setup a tokenizer; configure the appropriate padding token to use for padding sequences, and determine the maximum length of the tokenized labels: ```py tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) if tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) print(target_max_length) 3 ``` Create a `preprocess_function` to: 1. Tokenize the input text and labels. 2. For each example in a batch, pad the labels with the tokenizers `pad_token_id`. 3. Concatenate the input text and labels into the `model_inputs`. 4. Create a separate attention mask for `labels` and `model_inputs`. 5. Loop through each example in the batch again to pad the input ids, labels, and attention mask to the `max_length` and convert them to PyTorch tensors. ```py def preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] targets = [str(x) for x in examples[label_column]] model_inputs = tokenizer(inputs) labels = tokenizer(targets) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] + [tokenizer.pad_token_id] # print(i, sample_input_ids, label_input_ids) model_inputs["input_ids"][i] = sample_input_ids + label_input_ids labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i]) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length]) model_inputs["labels"] = labels["input_ids"] return model_inputs ``` Use the [`~datasets.Dataset.map`] function to apply the `preprocess_function` to the entire dataset. You can remove the unprocessed columns since the model won't need them: ```py processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) ``` Create a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) from the `train` and `eval` datasets. Set `pin_memory=True` to speed up the data transfer to the GPU during training if the samples in your dataset are on a CPU. ```py train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["test"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) ``` ## Train You're almost ready to setup your model and start training! Initialize a base model from [`~transformers.AutoModelForCausalLM`], and pass it and `peft_config` to the [`get_peft_model`] function to create a [`PeftModel`]. You can print the new [`PeftModel`]'s trainable parameters to see how much more efficient it is than training the full parameters of the original model! ```py model = AutoModelForCausalLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) print(model.print_trainable_parameters()) "trainable params: 8192 || all params: 559222784 || trainable%: 0.0014648902430985358" ``` Setup an optimizer and learning rate scheduler: ```py optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) ``` Move the model to the GPU, then write a training loop to start training! ```py model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}") ``` ## Share model You can store and share your model on the Hub if you'd like. Log in to your Hugging Face account and enter your token when prompted: ```py from huggingface_hub import notebook_login notebook_login() ``` Use the [`~transformers.PreTrainedModel.push_to_hub`] function to upload your model to a model repository on the Hub: ```py peft_model_id = "your-name/bloomz-560m_PROMPT_TUNING_CAUSAL_LM" model.push_to_hub("your-name/bloomz-560m_PROMPT_TUNING_CAUSAL_LM", use_auth_token=True) ``` Once the model is uploaded, you'll see the model file size is only 33.5kB! 🤏 ## Inference Let's try the model on a sample input for inference. If you look at the repository you uploaded the model to, you'll see a `adapter_config.json` file. Load this file into [`PeftConfig`] to specify the `peft_type` and `task_type`. Then you can load the prompt tuned model weights, and the configuration into [`~PeftModel.from_pretrained`] to create the [`PeftModel`]: ```py from peft import PeftModel, PeftConfig peft_model_id = "stevhliu/bloomz-560m_PROMPT_TUNING_CAUSAL_LM" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id) ``` Grab a tweet and tokenize it: ```py inputs = tokenizer( f'{text_column} : {"@nationalgridus I have no water and the bill is current and paid. Can you do something about this?"} Label : ', return_tensors="pt", ) ``` Put the model on a GPU and *generate* the predicted label: ```py model.to(device) with torch.no_grad(): inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3 ) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)) [ "Tweet text : @nationalgridus I have no water and the bill is current and paid. Can you do something about this? Label : complaint" ] ```
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/task_guides/semantic_segmentation_lora.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Semantic segmentation using LoRA This guide demonstrates how to use LoRA, a low-rank approximation technique, to finetune a SegFormer model variant for semantic segmentation. By using LoRA from 🤗 PEFT, we can reduce the number of trainable parameters in the SegFormer model to only 14% of the original trainable parameters. LoRA achieves this reduction by adding low-rank "update matrices" to specific blocks of the model, such as the attention blocks. During fine-tuning, only these matrices are trained, while the original model parameters are left unchanged. At inference time, the update matrices are merged with the original model parameters to produce the final classification result. For more information on LoRA, please refer to the [original LoRA paper](https://arxiv.org/abs/2106.09685). ## Install dependencies Install the libraries required for model training: ```bash !pip install transformers accelerate evaluate datasets peft -q ``` ## Authenticate to share your model To share the finetuned model with the community at the end of the training, authenticate using your 🤗 token. You can obtain your token from your [account settings](https://huggingface.co/settings/token). ```python from huggingface_hub import notebook_login notebook_login() ``` ## Load a dataset To ensure that this example runs within a reasonable time frame, here we are limiting the number of instances from the training set of the [SceneParse150 dataset](https://huggingface.co/datasets/scene_parse_150) to 150. ```python from datasets import load_dataset ds = load_dataset("scene_parse_150", split="train[:150]") ``` Next, split the dataset into train and test sets. ```python ds = ds.train_test_split(test_size=0.1) train_ds = ds["train"] test_ds = ds["test"] ``` ## Prepare label maps Create a dictionary that maps a label id to a label class, which will be useful when setting up the model later: * `label2id`: maps the semantic classes of the dataset to integer ids. * `id2label`: maps integer ids back to the semantic classes. ```python import json from huggingface_hub import cached_download, hf_hub_url repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} num_labels = len(id2label) ``` ## Prepare datasets for training and evaluation Next, load the SegFormer image processor to prepare the images and annotations for the model. This dataset uses the zero-index as the background class, so make sure to set `do_reduce_labels=True` to subtract one from all labels since the background class is not among the 150 classes. ```python from transformers import AutoImageProcessor checkpoint = "nvidia/mit-b0" image_processor = AutoImageProcessor.from_pretrained(checkpoint, do_reduce_labels=True) ``` Add a function to apply data augmentation to the images, so that the model is more robust against overfitting. Here we use the [ColorJitter](https://pytorch.org/vision/stable/generated/torchvision.transforms.ColorJitter.html) function from [torchvision](https://pytorch.org/vision/stable/index.html) to randomly change the color properties of an image. ```python from torchvision.transforms import ColorJitter jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) ``` Add a function to handle grayscale images and ensure that each input image has three color channels, regardless of whether it was originally grayscale or RGB. The function converts RGB images to array as is, and for grayscale images that have only one color channel, the function replicates the same channel three times using `np.tile()` before converting the image into an array. ```python import numpy as np def handle_grayscale_image(image): np_image = np.array(image) if np_image.ndim == 2: tiled_image = np.tile(np.expand_dims(np_image, -1), 3) return Image.fromarray(tiled_image) else: return Image.fromarray(np_image) ``` Finally, combine everything in two functions that you'll use to transform training and validation data. The two functions are similar except data augmentation is applied only to the training data. ```python from PIL import Image def train_transforms(example_batch): images = [jitter(handle_grayscale_image(x)) for x in example_batch["image"]] labels = [x for x in example_batch["annotation"]] inputs = image_processor(images, labels) return inputs def val_transforms(example_batch): images = [handle_grayscale_image(x) for x in example_batch["image"]] labels = [x for x in example_batch["annotation"]] inputs = image_processor(images, labels) return inputs ``` To apply the preprocessing functions over the entire dataset, use the 🤗 Datasets `set_transform` function: ```python train_ds.set_transform(train_transforms) test_ds.set_transform(val_transforms) ``` ## Create evaluation function Including a metric during training is helpful for evaluating your model's performance. You can load an evaluation method with the [🤗 Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, use the [mean Intersection over Union (IoU)](https://huggingface.co/spaces/evaluate-metric/accuracy) metric (see the 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric): ```python import torch from torch import nn import evaluate metric = evaluate.load("mean_iou") def compute_metrics(eval_pred): with torch.no_grad(): logits, labels = eval_pred logits_tensor = torch.from_numpy(logits) logits_tensor = nn.functional.interpolate( logits_tensor, size=labels.shape[-2:], mode="bilinear", align_corners=False, ).argmax(dim=1) pred_labels = logits_tensor.detach().cpu().numpy() # currently using _compute instead of compute # see this issue for more info: https://github.com/huggingface/evaluate/pull/328#issuecomment-1286866576 metrics = metric._compute( predictions=pred_labels, references=labels, num_labels=len(id2label), ignore_index=0, reduce_labels=image_processor.do_reduce_labels, ) per_category_accuracy = metrics.pop("per_category_accuracy").tolist() per_category_iou = metrics.pop("per_category_iou").tolist() metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)}) metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)}) return metrics ``` ## Load a base model Before loading a base model, let's define a helper function to check the total number of parameters a model has, as well as how many of them are trainable. ```python def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}" ) ``` Choose a base model checkpoint. For this example, we use the [SegFormer B0 variant](https://huggingface.co/nvidia/mit-b0). In addition to the checkpoint, pass the `label2id` and `id2label` dictionaries to let the `AutoModelForSemanticSegmentation` class know that we're interested in a custom base model where the decoder head should be randomly initialized using the classes from the custom dataset. ```python from transformers import AutoModelForSemanticSegmentation, TrainingArguments, Trainer model = AutoModelForSemanticSegmentation.from_pretrained( checkpoint, id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True ) print_trainable_parameters(model) ``` At this point you can check with the `print_trainable_parameters` helper function that all 100% parameters in the base model (aka `model`) are trainable. ## Wrap the base model as a PeftModel for LoRA training To leverage the LoRa method, you need to wrap the base model as a `PeftModel`. This involves two steps: 1. Defining LoRa configuration with `LoraConfig` 2. Wrapping the original `model` with `get_peft_model()` using the config defined in the step above. ```python from peft import LoraConfig, get_peft_model config = LoraConfig( r=32, lora_alpha=32, target_modules=["query", "value"], lora_dropout=0.1, bias="lora_only", modules_to_save=["decode_head"], ) lora_model = get_peft_model(model, config) print_trainable_parameters(lora_model) ``` Let's review the `LoraConfig`. To enable LoRA technique, we must define the target modules within `LoraConfig` so that `PeftModel` can update the necessary matrices. Specifically, we want to target the `query` and `value` matrices in the attention blocks of the base model. These matrices are identified by their respective names, "query" and "value". Therefore, we should specify these names in the `target_modules` argument of `LoraConfig`. After we wrap our base model `model` with `PeftModel` along with the config, we get a new model where only the LoRA parameters are trainable (so-called "update matrices") while the pre-trained parameters are kept frozen. These include the parameters of the randomly initialized classifier parameters too. This is NOT we want when fine-tuning the base model on our custom dataset. To ensure that the classifier parameters are also trained, we specify `modules_to_save`. This also ensures that these modules are serialized alongside the LoRA trainable parameters when using utilities like `save_pretrained()` and `push_to_hub()`. In addition to specifying the `target_modules` within `LoraConfig`, we also need to specify the `modules_to_save`. When we wrap our base model with `PeftModel` and pass the configuration, we obtain a new model in which only the LoRA parameters are trainable, while the pre-trained parameters and the randomly initialized classifier parameters are kept frozen. However, we do want to train the classifier parameters. By specifying the `modules_to_save` argument, we ensure that the classifier parameters are also trainable, and they will be serialized alongside the LoRA trainable parameters when we use utility functions like `save_pretrained()` and `push_to_hub()`. Let's review the rest of the parameters: - `r`: The dimension used by the LoRA update matrices. - `alpha`: Scaling factor. - `bias`: Specifies if the `bias` parameters should be trained. `None` denotes none of the `bias` parameters will be trained. When all is configured, and the base model is wrapped, the `print_trainable_parameters` helper function lets us explore the number of trainable parameters. Since we're interested in performing **parameter-efficient fine-tuning**, we should expect to see a lower number of trainable parameters from the `lora_model` in comparison to the original `model` which is indeed the case here. You can also manually verify what modules are trainable in the `lora_model`. ```python for name, param in lora_model.named_parameters(): if param.requires_grad: print(name, param.shape) ``` This confirms that only the LoRA parameters appended to the attention blocks and the `decode_head` parameters are trainable. ## Train the model Start by defining your training hyperparameters in `TrainingArguments`. You can change the values of most parameters however you prefer. Make sure to set `remove_unused_columns=False`, otherwise the image column will be dropped, and it's required here. The only other required parameter is `output_dir` which specifies where to save your model. At the end of each epoch, the `Trainer` will evaluate the IoU metric and save the training checkpoint. Note that this example is meant to walk you through the workflow when using PEFT for semantic segmentation. We didn't perform extensive hyperparameter tuning to achieve optimal results. ```python model_name = checkpoint.split("/")[-1] training_args = TrainingArguments( output_dir=f"{model_name}-scene-parse-150-lora", learning_rate=5e-4, num_train_epochs=50, per_device_train_batch_size=4, per_device_eval_batch_size=2, save_total_limit=3, evaluation_strategy="epoch", save_strategy="epoch", logging_steps=5, remove_unused_columns=False, push_to_hub=True, label_names=["labels"], ) ``` Pass the training arguments to `Trainer` along with the model, dataset, and `compute_metrics` function. Call `train()` to finetune your model. ```python trainer = Trainer( model=lora_model, args=training_args, train_dataset=train_ds, eval_dataset=test_ds, compute_metrics=compute_metrics, ) trainer.train() ``` ## Save the model and run inference Use the `save_pretrained()` method of the `lora_model` to save the *LoRA-only parameters* locally. Alternatively, use the `push_to_hub()` method to upload these parameters directly to the Hugging Face Hub (as shown in the [Image classification using LoRA](image_classification_lora) task guide). ```python model_id = "segformer-scene-parse-150-lora" lora_model.save_pretrained(model_id) ``` We can see that the LoRA-only parameters are just **2.2 MB in size**! This greatly improves the portability when using very large models. ```bash !ls -lh {model_id} total 2.2M -rw-r--r-- 1 root root 369 Feb 8 03:09 adapter_config.json -rw-r--r-- 1 root root 2.2M Feb 8 03:09 adapter_model.bin ``` Let's now prepare an `inference_model` and run inference. ```python from peft import PeftConfig config = PeftConfig.from_pretrained(model_id) model = AutoModelForSemanticSegmentation.from_pretrained( checkpoint, id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True ) inference_model = PeftModel.from_pretrained(model, model_id) ``` Get an image: ```python import requests url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png" image = Image.open(requests.get(url, stream=True).raw) image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png" alt="photo of a room"/> </div> Preprocess the image to prepare for inference. ```python encoding = image_processor(image.convert("RGB"), return_tensors="pt") ``` Run inference with the encoded image. ```python with torch.no_grad(): outputs = inference_model(pixel_values=encoding.pixel_values) logits = outputs.logits upsampled_logits = nn.functional.interpolate( logits, size=image.size[::-1], mode="bilinear", align_corners=False, ) pred_seg = upsampled_logits.argmax(dim=1)[0] ``` Next, visualize the results. We need a color palette for this. Here, we use ade_palette(). As it is a long array, so we don't include it in this guide, please copy it from [the TensorFlow Model Garden repository](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51). ```python import matplotlib.pyplot as plt color_seg = np.zeros((pred_seg.shape[0], pred_seg.shape[1], 3), dtype=np.uint8) palette = np.array(ade_palette()) for label, color in enumerate(palette): color_seg[pred_seg == label, :] = color color_seg = color_seg[..., ::-1] # convert to BGR img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map img = img.astype(np.uint8) plt.figure(figsize=(15, 10)) plt.imshow(img) plt.show() ``` As you can see, the results are far from perfect, however, this example is designed to illustrate the end-to-end workflow of fine-tuning a semantic segmentation model with LoRa technique, and is not aiming to achieve state-of-the-art results. The results you see here are the same as you would get if you performed full fine-tuning on the same setup (same model variant, same dataset, same training schedule, etc.), except LoRA allows to achieve them with a fraction of total trainable parameters and in less time. If you wish to use this example and improve the results, here are some things that you can try: * Increase the number of training samples. * Try a larger SegFormer model variant (explore available model variants on the [Hugging Face Hub](https://huggingface.co/models?search=segformer)). * Try different values for the arguments available in `LoraConfig`. * Tune the learning rate and batch size.
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/task_guides/semantic-similarity-lora.md
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LoRA for semantic similarity tasks Low-Rank Adaptation (LoRA) is a reparametrization method that aims to reduce the number of trainable parameters with low-rank representations. The weight matrix is broken down into low-rank matrices that are trained and updated. All the pretrained model parameters remain frozen. After training, the low-rank matrices are added back to the original weights. This makes it more efficient to store and train a LoRA model because there are significantly fewer parameters. <Tip> 💡 Read [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) to learn more about LoRA. </Tip> In this guide, we'll be using a LoRA [script](https://github.com/huggingface/peft/tree/main/examples/lora_dreambooth) to fine-tune a [`intfloat/e5-large-v2`](https://huggingface.co/intfloat/e5-large-v2) model on the [`smangrul/amazon_esci`](https://huggingface.co/datasets/smangrul/amazon_esci) dataset for semantic similarity tasks. Feel free to explore the script to learn how things work in greater detail! ## Setup Start by installing 🤗 PEFT from [source](https://github.com/huggingface/peft), and then navigate to the directory containing the training scripts for fine-tuning DreamBooth with LoRA: ```bash cd peft/examples/feature_extraction ``` Install all the necessary required libraries with: ```bash pip install -r requirements.txt ``` Next, import all the necessary libraries: - 🤗 Transformers for loading the `intfloat/e5-large-v2` model and tokenizer - 🤗 Accelerate for the training loop - 🤗 Datasets for loading and preparing the `smangrul/amazon_esci` dataset for training and inference - 🤗 Evaluate for evaluating the model's performance - 🤗 PEFT for setting up the LoRA configuration and creating the PEFT model - 🤗 huggingface_hub for uploading the trained model to HF hub - hnswlib for creating the search index and doing fast approximate nearest neighbor search <Tip> It is assumed that PyTorch with CUDA support is already installed. </Tip> ## Train Launch the training script with `accelerate launch` and pass your hyperparameters along with the `--use_peft` argument to enable LoRA. This guide uses the following [`LoraConfig`]: ```py peft_config = LoraConfig( r=8, lora_alpha=16, bias="none", task_type=TaskType.FEATURE_EXTRACTION, target_modules=["key", "query", "value"], ) ``` Here's what a full set of script arguments may look like when running in Colab on a V100 GPU with standard RAM: ```bash accelerate launch \ --mixed_precision="fp16" \ peft_lora_embedding_semantic_search.py \ --dataset_name="smangrul/amazon_esci" \ --max_length=70 --model_name_or_path="intfloat/e5-large-v2" \ --per_device_train_batch_size=64 \ --per_device_eval_batch_size=128 \ --learning_rate=5e-4 \ --weight_decay=0.0 \ --num_train_epochs 3 \ --gradient_accumulation_steps=1 \ --output_dir="results/peft_lora_e5_ecommerce_semantic_search_colab" \ --seed=42 \ --push_to_hub \ --hub_model_id="smangrul/peft_lora_e5_ecommerce_semantic_search_colab" \ --with_tracking \ --report_to="wandb" \ --use_peft \ --checkpointing_steps "epoch" ``` ## Dataset for semantic similarity The dataset we'll be using is a small subset of the [esci-data](https://github.com/amazon-science/esci-data.git) dataset (it can be found on Hub at [smangrul/amazon_esci](https://huggingface.co/datasets/smangrul/amazon_esci)). Each sample contains a tuple of `(query, product_title, relevance_label)` where `relevance_label` is `1` if the product matches the intent of the `query`, otherwise it is `0`. Our task is to build an embedding model that can retrieve semantically similar products given a product query. This is usually the first stage in building a product search engine to retrieve all the potentially relevant products of a given query. Typically, this involves using Bi-Encoder models to cross-join the query and millions of products which could blow up quickly. Instead, you can use a Transformer model to retrieve the top K nearest similar products for a given query by embedding the query and products in the same latent embedding space. The millions of products are embedded offline to create a search index. At run time, only the query is embedded by the model, and products are retrieved from the search index with a fast approximate nearest neighbor search library such as [FAISS](https://github.com/facebookresearch/faiss) or [HNSWlib](https://github.com/nmslib/hnswlib). The next stage involves reranking the retrieved list of products to return the most relevant ones; this stage can utilize cross-encoder based models as the cross-join between the query and a limited set of retrieved products. The diagram below from [awesome-semantic-search](https://github.com/rom1504/awesome-semantic-search) outlines a rough semantic search pipeline: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/semantic_search_pipeline.png" alt="Semantic Search Pipeline"/> </div> For this task guide, we will explore the first stage of training an embedding model to predict semantically similar products given a product query. ## Training script deep dive We finetune [e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) which tops the [MTEB benchmark](https://huggingface.co/spaces/mteb/leaderboard) using PEFT-LoRA. [`AutoModelForSentenceEmbedding`] returns the query and product embeddings, and the `mean_pooling` function pools them across the sequence dimension and normalizes them: ```py class AutoModelForSentenceEmbedding(nn.Module): def __init__(self, model_name, tokenizer, normalize=True): super(AutoModelForSentenceEmbedding, self).__init__() self.model = AutoModel.from_pretrained(model_name) self.normalize = normalize self.tokenizer = tokenizer def forward(self, **kwargs): model_output = self.model(**kwargs) embeddings = self.mean_pooling(model_output, kwargs["attention_mask"]) if self.normalize: embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) return embeddings def mean_pooling(self, model_output, attention_mask): token_embeddings = model_output[0] # First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.model, name) def get_cosine_embeddings(query_embs, product_embs): return torch.sum(query_embs * product_embs, axis=1) def get_loss(cosine_score, labels): return torch.mean(torch.square(labels * (1 - cosine_score) + torch.clamp((1 - labels) * cosine_score, min=0.0))) ``` The `get_cosine_embeddings` function computes the cosine similarity and the `get_loss` function computes the loss. The loss enables the model to learn that a cosine score of `1` for query and product pairs is relevant, and a cosine score of `0` or below is irrelevant. Define the [`PeftConfig`] with your LoRA hyperparameters, and create a [`PeftModel`]. We use 🤗 Accelerate for handling all device management, mixed precision training, gradient accumulation, WandB tracking, and saving/loading utilities. ## Results The table below compares the training time, the batch size that could be fit in Colab, and the best ROC-AUC scores between a PEFT model and a fully fine-tuned model: | Training Type | Training time per epoch (Hrs) | Batch Size that fits | ROC-AUC score (higher is better) | | ----------------- | ------------- | ---------- | -------- | | Pre-Trained e5-large-v2 | - | - | 0.68 | | PEFT | 1.73 | 64 | 0.787 | | Full Fine-Tuning | 2.33 | 32 | 0.7969 | The PEFT-LoRA model trains **1.35X** faster and can fit **2X** batch size compared to the fully fine-tuned model, and the performance of PEFT-LoRA is comparable to the fully fine-tuned model with a relative drop of **-1.24%** in ROC-AUC. This gap can probably be closed with bigger models as mentioned in [The Power of Scale for Parameter-Efficient Prompt Tuning ](https://huggingface.co/papers/2104.08691). ## Inference Let's go! Now we have the model, we need to create a search index of all the products in our catalog. Please refer to `peft_lora_embedding_semantic_similarity_inference.ipynb` for the complete inference code. 1. Get a list of ids to products which we can call `ids_to_products_dict`: ```bash {0: 'RamPro 10" All Purpose Utility Air Tires/Wheels with a 5/8" Diameter Hole with Double Sealed Bearings (Pack of 2)', 1: 'MaxAuto 2-Pack 13x5.00-6 2PLY Turf Mower Tractor Tire with Yellow Rim, (3" Centered Hub, 3/4" Bushings )', 2: 'NEIKO 20601A 14.5 inch Steel Tire Spoon Lever Iron Tool Kit | Professional Tire Changing Tool for Motorcycle, Dirt Bike, Lawn Mower | 3 pcs Tire Spoons | 3 Rim Protector | Valve Tool | 6 Valve Cores', 3: '2PK 13x5.00-6 13x5.00x6 13x5x6 13x5-6 2PLY Turf Mower Tractor Tire with Gray Rim', 4: '(Set of 2) 15x6.00-6 Husqvarna/Poulan Tire Wheel Assy .75" Bearing', 5: 'MaxAuto 2 Pcs 16x6.50-8 Lawn Mower Tire for Garden Tractors Ridings, 4PR, Tubeless', 6: 'Dr.Roc Tire Spoon Lever Dirt Bike Lawn Mower Motorcycle Tire Changing Tools with Durable Bag 3 Tire Irons 2 Rim Protectors 1 Valve Stems Set TR412 TR413', 7: 'MARASTAR 21446-2PK 15x6.00-6" Front Tire Assembly Replacement-Craftsman Mower, Pack of 2', 8: '15x6.00-6" Front Tire Assembly Replacement for 100 and 300 Series John Deere Riding Mowers - 2 pack', 9: 'Honda HRR Wheel Kit (2 Front 44710-VL0-L02ZB, 2 Back 42710-VE2-M02ZE)', 10: 'Honda 42710-VE2-M02ZE (Replaces 42710-VE2-M01ZE) Lawn Mower Rear Wheel Set of 2' ... ``` 2. Use the trained [smangrul/peft_lora_e5_ecommerce_semantic_search_colab](https://huggingface.co/smangrul/peft_lora_e5_ecommerce_semantic_search_colab) model to get the product embeddings: ```py # base model model = AutoModelForSentenceEmbedding(model_name_or_path, tokenizer) # peft config and wrapping model = PeftModel.from_pretrained(model, peft_model_id) device = "cuda" model.to(device) model.eval() model = model.merge_and_unload() import numpy as np num_products= len(dataset) d = 1024 product_embeddings_array = np.zeros((num_products, d)) for step, batch in enumerate(tqdm(dataloader)): with torch.no_grad(): with torch.amp.autocast(dtype=torch.bfloat16, device_type="cuda"): product_embs = model(**{k:v.to(device) for k, v in batch.items()}).detach().float().cpu() start_index = step*batch_size end_index = start_index+batch_size if (start_index+batch_size) < num_products else num_products product_embeddings_array[start_index:end_index] = product_embs del product_embs, batch ``` 3. Create a search index using HNSWlib: ```py def construct_search_index(dim, num_elements, data): # Declaring index search_index = hnswlib.Index(space = 'ip', dim = dim) # possible options are l2, cosine or ip # Initializing index - the maximum number of elements should be known beforehand search_index.init_index(max_elements = num_elements, ef_construction = 200, M = 100) # Element insertion (can be called several times): ids = np.arange(num_elements) search_index.add_items(data, ids) return search_index product_search_index = construct_search_index(d, num_products, product_embeddings_array) ``` 4. Get the query embeddings and nearest neighbors: ```py def get_query_embeddings(query, model, tokenizer, device): inputs = tokenizer(query, padding="max_length", max_length=70, truncation=True, return_tensors="pt") model.eval() with torch.no_grad(): query_embs = model(**{k:v.to(device) for k, v in inputs.items()}).detach().cpu() return query_embs[0] def get_nearest_neighbours(k, search_index, query_embeddings, ids_to_products_dict, threshold=0.7): # Controlling the recall by setting ef: search_index.set_ef(100) # ef should always be > k # Query dataset, k - number of the closest elements (returns 2 numpy arrays) labels, distances = search_index.knn_query(query_embeddings, k = k) return [(ids_to_products_dict[label], (1-distance)) for label, distance in zip(labels[0], distances[0]) if (1-distance)>=threshold] ``` 5. Let's test it out with the query `deep learning books`: ```py query = "deep learning books" k = 10 query_embeddings = get_query_embeddings(query, model, tokenizer, device) search_results = get_nearest_neighbours(k, product_search_index, query_embeddings, ids_to_products_dict, threshold=0.7) print(f"{query=}") for product, cosine_sim_score in search_results: print(f"cosine_sim_score={round(cosine_sim_score,2)} {product=}") ``` Output: ```bash query='deep learning books' cosine_sim_score=0.95 product='Deep Learning (The MIT Press Essential Knowledge series)' cosine_sim_score=0.93 product='Practical Deep Learning: A Python-Based Introduction' cosine_sim_score=0.9 product='Hands-On Machine Learning with Scikit-Learn and TensorFlow: Concepts, Tools, and Techniques to Build Intelligent Systems' cosine_sim_score=0.9 product='Machine Learning: A Hands-On, Project-Based Introduction to Machine Learning for Absolute Beginners: Mastering Engineering ML Systems using Scikit-Learn and TensorFlow' cosine_sim_score=0.9 product='Mastering Machine Learning on AWS: Advanced machine learning in Python using SageMaker, Apache Spark, and TensorFlow' cosine_sim_score=0.9 product='The Hundred-Page Machine Learning Book' cosine_sim_score=0.89 product='Hands-On Machine Learning with Scikit-Learn, Keras, and TensorFlow: Concepts, Tools, and Techniques to Build Intelligent Systems' cosine_sim_score=0.89 product='Machine Learning: A Journey from Beginner to Advanced Including Deep Learning, Scikit-learn and Tensorflow' cosine_sim_score=0.88 product='Mastering Machine Learning with scikit-learn' cosine_sim_score=0.88 product='Mastering Machine Learning with scikit-learn - Second Edition: Apply effective learning algorithms to real-world problems using scikit-learn' ``` Books on deep learning and machine learning are retrieved even though `machine learning` wasn't included in the query. This means the model has learned that these books are semantically relevant to the query based on the purchase behavior of customers on Amazon. The next steps would ideally involve using ONNX/TensorRT to optimize the model and using a Triton server to host it. Check out 🤗 [Optimum](https://huggingface.co/docs/optimum/index) for related optimizations for efficient serving!
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/task_guides/ptuning-seq-classification.md
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # P-tuning for sequence classification It is challenging to finetune large language models for downstream tasks because they have so many parameters. To work around this, you can use *prompts* to steer the model toward a particular downstream task without fully finetuning a model. Typically, these prompts are handcrafted, which may be impractical because you need very large validation sets to find the best prompts. *P-tuning* is a method for automatically searching and optimizing for better prompts in a continuous space. <Tip> 💡 Read [GPT Understands, Too](https://arxiv.org/abs/2103.10385) to learn more about p-tuning. </Tip> This guide will show you how to train a [`roberta-large`](https://huggingface.co/roberta-large) model (but you can also use any of the GPT, OPT, or BLOOM models) with p-tuning on the `mrpc` configuration of the [GLUE](https://huggingface.co/datasets/glue) benchmark. Before you begin, make sure you have all the necessary libraries installed: ```bash !pip install -q peft transformers datasets evaluate ``` ## Setup To get started, import 🤗 Transformers to create the base model, 🤗 Datasets to load a dataset, 🤗 Evaluate to load an evaluation metric, and 🤗 PEFT to create a [`PeftModel`] and setup the configuration for p-tuning. Define the model, dataset, and some basic training hyperparameters: ```py from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, TrainingArguments, Trainer, ) from peft import ( get_peft_config, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict, PeftType, PromptEncoderConfig, ) from datasets import load_dataset import evaluate import torch model_name_or_path = "roberta-large" task = "mrpc" num_epochs = 20 lr = 1e-3 batch_size = 32 ``` ## Load dataset and metric Next, load the `mrpc` configuration - a corpus of sentence pairs labeled according to whether they're semantically equivalent or not - from the [GLUE](https://huggingface.co/datasets/glue) benchmark: ```py dataset = load_dataset("glue", task) dataset["train"][0] { "sentence1": 'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .', "sentence2": 'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .', "label": 1, "idx": 0, } ``` From 🤗 Evaluate, load a metric for evaluating the model's performance. The evaluation module returns the accuracy and F1 scores associated with this specific task. ```py metric = evaluate.load("glue", task) ``` Now you can use the `metric` to write a function that computes the accuracy and F1 scores. The `compute_metric` function calculates the scores from the model predictions and labels: ```py import numpy as np def compute_metrics(eval_pred): predictions, labels = eval_pred predictions = np.argmax(predictions, axis=1) return metric.compute(predictions=predictions, references=labels) ``` ## Preprocess dataset Initialize the tokenizer and configure the padding token to use. If you're using a GPT, OPT, or BLOOM model, you should set the `padding_side` to the left; otherwise it'll be set to the right. Tokenize the sentence pairs and truncate them to the maximum length. ```py if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")): padding_side = "left" else: padding_side = "right" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs ``` Use [`~datasets.Dataset.map`] to apply the `tokenize_function` to the dataset, and remove the unprocessed columns because the model won't need those. You should also rename the `label` column to `labels` because that is the expected name for the labels by models in the 🤗 Transformers library. ```py tokenized_datasets = dataset.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) tokenized_datasets = tokenized_datasets.rename_column("label", "labels") ``` Create a collator function with [`~transformers.DataCollatorWithPadding`] to pad the examples in the batches to the `longest` sequence in the batch: ```py data_collator = DataCollatorWithPadding(tokenizer=tokenizer, padding="longest") ``` ## Train P-tuning uses a prompt encoder to optimize the prompt parameters, so you'll need to initialize the [`PromptEncoderConfig`] with several arguments: - `task_type`: the type of task you're training on, in this case it is sequence classification or `SEQ_CLS` - `num_virtual_tokens`: the number of virtual tokens to use, or in other words, the prompt - `encoder_hidden_size`: the hidden size of the encoder used to optimize the prompt parameters ```py peft_config = PromptEncoderConfig(task_type="SEQ_CLS", num_virtual_tokens=20, encoder_hidden_size=128) ``` Create the base `roberta-large` model from [`~transformers.AutoModelForSequenceClassification`], and then wrap the base model and `peft_config` with [`get_peft_model`] to create a [`PeftModel`]. If you're curious to see how many parameters you're actually training compared to training on all the model parameters, you can print it out with [`~peft.PeftModel.print_trainable_parameters`]: ```py model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True) model = get_peft_model(model, peft_config) model.print_trainable_parameters() "trainable params: 1351938 || all params: 355662082 || trainable%: 0.38011867680626127" ``` From the 🤗 Transformers library, set up the [`~transformers.TrainingArguments`] class with where you want to save the model to, the training hyperparameters, how to evaluate the model, and when to save the checkpoints: ```py training_args = TrainingArguments( output_dir="your-name/roberta-large-peft-p-tuning", learning_rate=1e-3, per_device_train_batch_size=32, per_device_eval_batch_size=32, num_train_epochs=2, weight_decay=0.01, evaluation_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, ) ``` Then pass the model, `TrainingArguments`, datasets, tokenizer, data collator, and evaluation function to the [`~transformers.Trainer`] class, which'll handle the entire training loop for you. Once you're ready, call [`~transformers.Trainer.train`] to start training! ```py trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["test"], tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) trainer.train() ``` ## Share model You can store and share your model on the Hub if you'd like. Log in to your Hugging Face account and enter your token when prompted: ```py from huggingface_hub import notebook_login notebook_login() ``` Upload the model to a specifc model repository on the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] function: ```py model.push_to_hub("your-name/roberta-large-peft-p-tuning", use_auth_token=True) ``` ## Inference Once the model has been uploaded to the Hub, anyone can easily use it for inference. Load the configuration and model: ```py import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForSequenceClassification, AutoTokenizer peft_model_id = "smangrul/roberta-large-peft-p-tuning" config = PeftConfig.from_pretrained(peft_model_id) inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(inference_model, peft_model_id) ``` Get some text and tokenize it: ```py classes = ["not equivalent", "equivalent"] sentence1 = "Coast redwood trees are the tallest trees on the planet and can grow over 300 feet tall." sentence2 = "The coast redwood trees, which can attain a height of over 300 feet, are the tallest trees on earth." inputs = tokenizer(sentence1, sentence2, truncation=True, padding="longest", return_tensors="pt") ``` Pass the inputs to the model to classify the sentences: ```py with torch.no_grad(): outputs = model(**inputs).logits print(outputs) paraphrased_text = torch.softmax(outputs, dim=1).tolist()[0] for i in range(len(classes)): print(f"{classes[i]}: {int(round(paraphrased_text[i] * 100))}%") "not equivalent: 4%" "equivalent: 96%" ```
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/task_guides/seq2seq-prefix-tuning.md
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Prefix tuning for conditional generation [[open-in-colab]] Prefix tuning is an additive method where only a sequence of continuous task-specific vectors is attached to the beginning of the input, or *prefix*. Only the prefix parameters are optimized and added to the hidden states in every layer of the model. The tokens of the input sequence can still attend to the prefix as *virtual tokens*. As a result, prefix tuning stores 1000x fewer parameters than a fully finetuned model, which means you can use one large language model for many tasks. <Tip> 💡 Read [Prefix-Tuning: Optimizing Continuous Prompts for Generation](https://arxiv.org/abs/2101.00190) to learn more about prefix tuning. </Tip> This guide will show you how to apply prefix tuning to train a [`t5-large`](https://huggingface.co/t5-large) model on the `sentences_allagree` subset of the [financial_phrasebank](https://huggingface.co/datasets/financial_phrasebank) dataset. Before you begin, make sure you have all the necessary libraries installed: ```bash !pip install -q peft transformers datasets ``` ## Setup Start by defining the model and tokenizer, text and label columns, and some hyperparameters so it'll be easier to start training faster later. Set the environment variable `TOKENIZERS_PARALLELSIM` to `false` to disable the fast Rust-based tokenizer which processes data in parallel by default so you can use multiprocessing in Python. ```py from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, default_data_collator, get_linear_schedule_with_warmup from peft import get_peft_config, get_peft_model, get_peft_model_state_dict, PrefixTuningConfig, TaskType from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm import torch import os os.environ["TOKENIZERS_PARALLELISM"] = "false" os.environ["CUDA_VISIBLE_DEVICES"] = "3" device = "cuda" model_name_or_path = "t5-large" tokenizer_name_or_path = "t5-large" text_column = "sentence" label_column = "text_label" max_length = 128 lr = 1e-2 num_epochs = 5 batch_size = 8 ``` ## Load dataset For this guide, you'll train on the `sentences_allagree` subset of the [`financial_phrasebank`](https://huggingface.co/datasets/financial_phrasebank) dataset. This dataset contains financial news categorized by sentiment. Use 🤗 [Datasets](https://huggingface.co/docs/datasets/index) [`~datasets.Dataset.train_test_split`] function to create a training and validation split and convert the `label` value to the more readable `text_label`. All of the changes can be applied with the [`~datasets.Dataset.map`] function: ```py from datasets import load_dataset dataset = load_dataset("financial_phrasebank", "sentences_allagree") dataset = dataset["train"].train_test_split(test_size=0.1) dataset["validation"] = dataset["test"] del dataset["test"] classes = dataset["train"].features["label"].names dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, ) dataset["train"][0] {"sentence": "Profit before taxes was EUR 4.0 mn , down from EUR 4.9 mn .", "label": 0, "text_label": "negative"} ``` ## Preprocess dataset Initialize a tokenizer, and create a function to pad and truncate the `model_inputs` and `labels`: ```py tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs ``` Use the [`~datasets.Dataset.map`] function to apply the `preprocess_function` to the dataset. You can remove the unprocessed columns since the model doesn't need them anymore: ```py processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) ``` Create a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) from the `train` and `eval` datasets. Set `pin_memory=True` to speed up the data transfer to the GPU during training if the samples in your dataset are on a CPU. ```py train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) ``` ## Train model Now you can setup your model and make sure it is ready for training. Specify the task in [`PrefixTuningConfig`], create the base `t5-large` model from [`~transformers.AutoModelForSeq2SeqLM`], and then wrap the model and configuration in a [`PeftModel`]. Feel free to print the [`PeftModel`]'s parameters and compare it to fully training all the model parameters to see how much more efficient it is! ```py peft_config = PrefixTuningConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, num_virtual_tokens=20) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() "trainable params: 983040 || all params: 738651136 || trainable%: 0.13308583065659835" ``` Setup the optimizer and learning rate scheduler: ```py optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) ``` Move the model to the GPU, and then write a training loop to begin! ```py model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}") ``` Let's see how well the model performs on the validation set: ```py correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["validation"]["text_label"]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 print(f"{accuracy=} % on the evaluation dataset") print(f"{eval_preds[:10]=}") print(f"{dataset['validation']['text_label'][:10]=}") "accuracy=97.3568281938326 % on the evaluation dataset" "eval_preds[:10]=['neutral', 'positive', 'neutral', 'positive', 'neutral', 'negative', 'negative', 'neutral', 'neutral', 'neutral']" "dataset['validation']['text_label'][:10]=['neutral', 'positive', 'neutral', 'positive', 'neutral', 'negative', 'negative', 'neutral', 'neutral', 'neutral']" ``` 97% accuracy in just a few minutes; pretty good! ## Share model You can store and share your model on the Hub if you'd like. Login to your Hugging Face account and enter your token when prompted: ```py from huggingface_hub import notebook_login notebook_login() ``` Upload the model to a specifc model repository on the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] function: ```py peft_model_id = "your-name/t5-large_PREFIX_TUNING_SEQ2SEQ" model.push_to_hub("your-name/t5-large_PREFIX_TUNING_SEQ2SEQ", use_auth_token=True) ``` If you check the model file size in the repository, you'll see that it is only 3.93MB! 🤏 ## Inference Once the model has been uploaded to the Hub, anyone can easily use it for inference. Load the configuration and model: ```py from peft import PeftModel, PeftConfig peft_model_id = "stevhliu/t5-large_PREFIX_TUNING_SEQ2SEQ" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id) ``` Get and tokenize some text about financial news: ```py inputs = tokenizer( "The Lithuanian beer market made up 14.41 million liters in January , a rise of 0.8 percent from the year-earlier figure , the Lithuanian Brewers ' Association reporting citing the results from its members .", return_tensors="pt", ) ``` Put the model on a GPU and *generate* the predicted text sentiment: ```py model.to(device) with torch.no_grad(): inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)) ["positive"] ```
0
hf_public_repos/peft/docs/source
hf_public_repos/peft/docs/source/task_guides/image_classification_lora.md
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image classification using LoRA This guide demonstrates how to use LoRA, a low-rank approximation technique, to fine-tune an image classification model. By using LoRA from 🤗 PEFT, we can reduce the number of trainable parameters in the model to only 0.77% of the original. LoRA achieves this reduction by adding low-rank "update matrices" to specific blocks of the model, such as the attention blocks. During fine-tuning, only these matrices are trained, while the original model parameters are left unchanged. At inference time, the update matrices are merged with the original model parameters to produce the final classification result. For more information on LoRA, please refer to the [original LoRA paper](https://arxiv.org/abs/2106.09685). ## Install dependencies Install the libraries required for model training: ```bash !pip install transformers accelerate evaluate datasets peft -q ``` Check the versions of all required libraries to make sure you are up to date: ```python import transformers import accelerate import peft print(f"Transformers version: {transformers.__version__}") print(f"Accelerate version: {accelerate.__version__}") print(f"PEFT version: {peft.__version__}") "Transformers version: 4.27.4" "Accelerate version: 0.18.0" "PEFT version: 0.2.0" ``` ## Authenticate to share your model To share the fine-tuned model at the end of the training with the community, authenticate using your 🤗 token. You can obtain your token from your [account settings](https://huggingface.co/settings/token). ```python from huggingface_hub import notebook_login notebook_login() ``` ## Select a model checkpoint to fine-tune Choose a model checkpoint from any of the model architectures supported for [image classification](https://huggingface.co/models?pipeline_tag=image-classification&sort=downloads). When in doubt, refer to the [image classification task guide](https://huggingface.co/docs/transformers/v4.27.2/en/tasks/image_classification) in 🤗 Transformers documentation. ```python model_checkpoint = "google/vit-base-patch16-224-in21k" ``` ## Load a dataset To keep this example's runtime short, let's only load the first 5000 instances from the training set of the [Food-101 dataset](https://huggingface.co/datasets/food101): ```python from datasets import load_dataset dataset = load_dataset("food101", split="train[:5000]") ``` ## Dataset preparation To prepare the dataset for training and evaluation, create `label2id` and `id2label` dictionaries. These will come in handy when performing inference and for metadata information: ```python labels = dataset.features["label"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = i id2label[i] = label id2label[2] "baklava" ``` Next, load the image processor of the model you're fine-tuning: ```python from transformers import AutoImageProcessor image_processor = AutoImageProcessor.from_pretrained(model_checkpoint) ``` The `image_processor` contains useful information on which size the training and evaluation images should be resized to, as well as values that should be used to normalize the pixel values. Using the `image_processor`, prepare transformation functions for the datasets. These functions will include data augmentation and pixel scaling: ```python from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) train_transforms = Compose( [ RandomResizedCrop(image_processor.size["height"]), RandomHorizontalFlip(), ToTensor(), normalize, ] ) val_transforms = Compose( [ Resize(image_processor.size["height"]), CenterCrop(image_processor.size["height"]), ToTensor(), normalize, ] ) def preprocess_train(example_batch): """Apply train_transforms across a batch.""" example_batch["pixel_values"] = [train_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch def preprocess_val(example_batch): """Apply val_transforms across a batch.""" example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch ``` Split the dataset into training and validation sets: ```python splits = dataset.train_test_split(test_size=0.1) train_ds = splits["train"] val_ds = splits["test"] ``` Finally, set the transformation functions for the datasets accordingly: ```python train_ds.set_transform(preprocess_train) val_ds.set_transform(preprocess_val) ``` ## Load and prepare a model Before loading the model, let's define a helper function to check the total number of parameters a model has, as well as how many of them are trainable. ```python def print_trainable_parameters(model): trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}" ) ``` It's important to initialize the original model correctly as it will be used as a base to create the `PeftModel` you'll actually fine-tune. Specify the `label2id` and `id2label` so that [`~transformers.AutoModelForImageClassification`] can append a classification head to the underlying model, adapted for this dataset. You should see the following output: ``` Some weights of ViTForImageClassification were not initialized from the model checkpoint at google/vit-base-patch16-224-in21k and are newly initialized: ['classifier.weight', 'classifier.bias'] ``` ```python from transformers import AutoModelForImageClassification, TrainingArguments, Trainer model = AutoModelForImageClassification.from_pretrained( model_checkpoint, label2id=label2id, id2label=id2label, ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint ) ``` Before creating a `PeftModel`, you can check the number of trainable parameters in the original model: ```python print_trainable_parameters(model) "trainable params: 85876325 || all params: 85876325 || trainable%: 100.00" ``` Next, use `get_peft_model` to wrap the base model so that "update" matrices are added to the respective places. ```python from peft import LoraConfig, get_peft_model config = LoraConfig( r=16, lora_alpha=16, target_modules=["query", "value"], lora_dropout=0.1, bias="none", modules_to_save=["classifier"], ) lora_model = get_peft_model(model, config) print_trainable_parameters(lora_model) "trainable params: 667493 || all params: 86466149 || trainable%: 0.77" ``` Let's unpack what's going on here. To use LoRA, you need to specify the target modules in `LoraConfig` so that `get_peft_model()` knows which modules inside our model need to be amended with LoRA matrices. In this example, we're only interested in targeting the query and value matrices of the attention blocks of the base model. Since the parameters corresponding to these matrices are "named" "query" and "value" respectively, we specify them accordingly in the `target_modules` argument of `LoraConfig`. We also specify `modules_to_save`. After wrapping the base model with `get_peft_model()` along with the `config`, we get a new model where only the LoRA parameters are trainable (so-called "update matrices") while the pre-trained parameters are kept frozen. However, we want the classifier parameters to be trained too when fine-tuning the base model on our custom dataset. To ensure that the classifier parameters are also trained, we specify `modules_to_save`. This also ensures that these modules are serialized alongside the LoRA trainable parameters when using utilities like `save_pretrained()` and `push_to_hub()`. Here's what the other parameters mean: - `r`: The dimension used by the LoRA update matrices. - `alpha`: Scaling factor. - `bias`: Specifies if the `bias` parameters should be trained. `None` denotes none of the `bias` parameters will be trained. `r` and `alpha` together control the total number of final trainable parameters when using LoRA, giving you the flexibility to balance a trade-off between end performance and compute efficiency. By looking at the number of trainable parameters, you can see how many parameters we're actually training. Since the goal is to achieve parameter-efficient fine-tuning, you should expect to see fewer trainable parameters in the `lora_model` in comparison to the original model, which is indeed the case here. ## Define training arguments For model fine-tuning, use [`~transformers.Trainer`]. It accepts several arguments which you can wrap using [`~transformers.TrainingArguments`]. ```python from transformers import TrainingArguments, Trainer model_name = model_checkpoint.split("/")[-1] batch_size = 128 args = TrainingArguments( f"{model_name}-finetuned-lora-food101", remove_unused_columns=False, evaluation_strategy="epoch", save_strategy="epoch", learning_rate=5e-3, per_device_train_batch_size=batch_size, gradient_accumulation_steps=4, per_device_eval_batch_size=batch_size, fp16=True, num_train_epochs=5, logging_steps=10, load_best_model_at_end=True, metric_for_best_model="accuracy", push_to_hub=True, label_names=["labels"], ) ``` Compared to non-PEFT methods, you can use a larger batch size since there are fewer parameters to train. You can also set a larger learning rate than the normal (1e-5 for example). This can potentially also reduce the need to conduct expensive hyperparameter tuning experiments. ## Prepare evaluation metric ```python import numpy as np import evaluate metric = evaluate.load("accuracy") def compute_metrics(eval_pred): """Computes accuracy on a batch of predictions""" predictions = np.argmax(eval_pred.predictions, axis=1) return metric.compute(predictions=predictions, references=eval_pred.label_ids) ``` The `compute_metrics` function takes a named tuple as input: `predictions`, which are the logits of the model as Numpy arrays, and `label_ids`, which are the ground-truth labels as Numpy arrays. ## Define collation function A collation function is used by [`~transformers.Trainer`] to gather a batch of training and evaluation examples and prepare them in a format that is acceptable by the underlying model. ```python import torch def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) labels = torch.tensor([example["label"] for example in examples]) return {"pixel_values": pixel_values, "labels": labels} ``` ## Train and evaluate Bring everything together - model, training arguments, data, collation function, etc. Then, start the training! ```python trainer = Trainer( lora_model, args, train_dataset=train_ds, eval_dataset=val_ds, tokenizer=image_processor, compute_metrics=compute_metrics, data_collator=collate_fn, ) train_results = trainer.train() ``` In just a few minutes, the fine-tuned model shows 96% validation accuracy even on this small subset of the training dataset. ```python trainer.evaluate(val_ds) { "eval_loss": 0.14475855231285095, "eval_accuracy": 0.96, "eval_runtime": 3.5725, "eval_samples_per_second": 139.958, "eval_steps_per_second": 1.12, "epoch": 5.0, } ``` ## Share your model and run inference Once the fine-tuning is done, share the LoRA parameters with the community like so: ```python repo_name = f"sayakpaul/{model_name}-finetuned-lora-food101" lora_model.push_to_hub(repo_name) ``` When calling [`~transformers.PreTrainedModel.push_to_hub`] on the `lora_model`, only the LoRA parameters along with any modules specified in `modules_to_save` are saved. Take a look at the [trained LoRA parameters](https://huggingface.co/sayakpaul/vit-base-patch16-224-in21k-finetuned-lora-food101/blob/main/adapter_model.bin). You'll see that it's only 2.6 MB! This greatly helps with portability, especially when using a very large model to fine-tune (such as [BLOOM](https://huggingface.co/bigscience/bloom)). Next, let's see how to load the LoRA updated parameters along with our base model for inference. When you wrap a base model with `PeftModel`, modifications are done *in-place*. To mitigate any concerns that might stem from in-place modifications, initialize the base model just like you did earlier and construct the inference model. ```python from peft import PeftConfig, PeftModel config = PeftConfig.from_pretrained(repo_name) model = AutoModelForImageClassification.from_pretrained( config.base_model_name_or_path, label2id=label2id, id2label=id2label, ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint ) # Load the LoRA model inference_model = PeftModel.from_pretrained(model, repo_name) ``` Let's now fetch an example image for inference. ```python from PIL import Image import requests url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/beignets.jpeg" image = Image.open(requests.get(url, stream=True).raw) image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/beignets.jpeg" alt="image of beignets"/> </div> First, instantiate an `image_processor` from the underlying model repo. ```python image_processor = AutoImageProcessor.from_pretrained(repo_name) ``` Then, prepare the example for inference. ```python encoding = image_processor(image.convert("RGB"), return_tensors="pt") ``` Finally, run inference! ```python with torch.no_grad(): outputs = inference_model(**encoding) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", inference_model.config.id2label[predicted_class_idx]) "Predicted class: beignets" ```
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/sequence_classification/Prompt_Tuning.ipynb
import argparse import os import torch from torch.optim import AdamW from torch.utils.data import DataLoader from peft import ( get_peft_config, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict, PeftType, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, ) import evaluate from datasets import load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from tqdm import tqdmbatch_size = 32 model_name_or_path = "roberta-large" task = "mrpc" peft_type = PeftType.PROMPT_TUNING device = "cuda" num_epochs = 20peft_config = PromptTuningConfig(task_type="SEQ_CLS", num_virtual_tokens=10) lr = 1e-3if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")): padding_side = "left" else: padding_side = "right" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id datasets = load_dataset("glue", task) metric = evaluate.load("glue", task) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size )model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True) model = get_peft_model(model, peft_config) model.print_trainable_parameters() modeloptimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs), num_training_steps=(len(train_dataloader) * num_epochs), )model.to(device) for epoch in range(num_epochs): model.train() for step, batch in enumerate(tqdm(train_dataloader)): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(f"epoch {epoch}:", eval_metric)model.push_to_hub("smangrul/roberta-large-peft-prompt-tuning", use_auth_token=True)import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "smangrul/roberta-large-peft-prompt-tuning" config = PeftConfig.from_pretrained(peft_model_id) inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model inference_model = PeftModel.from_pretrained(inference_model, peft_model_id) inference_model.to(device) inference_model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = inference_model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(eval_metric)
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/sequence_classification/requirements.txt
transformers accelerate evaluate tqdm datasets
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/sequence_classification/prefix_tuning.ipynb
import argparse import os import torch from torch.optim import AdamW from torch.utils.data import DataLoader from peft import ( get_peft_config, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict, PeftType, PrefixTuningConfig, PromptEncoderConfig, ) import evaluate from datasets import load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from tqdm import tqdmbatch_size = 32 model_name_or_path = "roberta-large" task = "mrpc" peft_type = PeftType.PREFIX_TUNING device = "cuda" num_epochs = 20peft_config = PrefixTuningConfig(task_type="SEQ_CLS", num_virtual_tokens=20) lr = 1e-2if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")): padding_side = "left" else: padding_side = "right" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id datasets = load_dataset("glue", task) metric = evaluate.load("glue", task) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size )model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True) model = get_peft_model(model, peft_config) model.print_trainable_parameters() modeloptimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs), num_training_steps=(len(train_dataloader) * num_epochs), )model.to(device) for epoch in range(num_epochs): model.train() for step, batch in enumerate(tqdm(train_dataloader)): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(f"epoch {epoch}:", eval_metric)model.push_to_hub("smangrul/roberta-large-peft-prefix-tuning", use_auth_token=True)import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "smangrul/roberta-large-peft-prefix-tuning" config = PeftConfig.from_pretrained(peft_model_id) inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model inference_model = PeftModel.from_pretrained(inference_model, peft_model_id) inference_model.to(device) inference_model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = inference_model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(eval_metric)
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/sequence_classification/LoRA.ipynb
import argparse import os import torch from torch.optim import AdamW from torch.utils.data import DataLoader from peft import ( get_peft_config, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict, LoraConfig, PeftType, PrefixTuningConfig, PromptEncoderConfig, ) import evaluate from datasets import load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from tqdm import tqdmbatch_size = 32 model_name_or_path = "roberta-large" task = "mrpc" peft_type = PeftType.LORA device = "cuda" num_epochs = 20peft_config = LoraConfig(task_type="SEQ_CLS", inference_mode=False, r=8, lora_alpha=16, lora_dropout=0.1) lr = 3e-4if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")): padding_side = "left" else: padding_side = "right" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id datasets = load_dataset("glue", task) metric = evaluate.load("glue", task) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size )model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True) model = get_peft_model(model, peft_config) model.print_trainable_parameters() modeloptimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs), num_training_steps=(len(train_dataloader) * num_epochs), )model.to(device) for epoch in range(num_epochs): model.train() for step, batch in enumerate(tqdm(train_dataloader)): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(f"epoch {epoch}:", eval_metric)model.push_to_hub("smangrul/roberta-large-peft-lora", use_auth_token=True)import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "smangrul/roberta-large-peft-lora" config = PeftConfig.from_pretrained(peft_model_id) inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model inference_model = PeftModel.from_pretrained(inference_model, peft_model_id) inference_model.to(device) inference_model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = inference_model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(eval_metric)
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/sequence_classification/peft_no_lora_accelerate.py
import argparse import evaluate import torch from accelerate import Accelerator, DistributedDataParallelKwargs from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from peft import ( PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, get_peft_model, ) from peft.utils.other import fsdp_auto_wrap_policy def parse_args(): parser = argparse.ArgumentParser(description="PEFT a transformers model on a sequence classification task") parser.add_argument( "--num_virtual_tokens", type=int, default=20, help="num_virtual_tokens if the number of virtual tokens used in prompt/prefix/P tuning.", ) parser.add_argument( "--encoder_hidden_size", type=int, default=128, help="encoder_hidden_size if the encoder hidden size used in P tuninig/Prefix tuning.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=1e-3, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--peft_type", type=str, default="p_tuning", help="The PEFT type to use.", choices=["p_tuning", "prefix_tuning", "prompt_tuning"], ) args = parser.parse_args() assert args.output_dir is not None, "Need an `output_dir` to store the finetune model and verify." return args def main(): args = parse_args() ddp_scaler = DistributedDataParallelKwargs(find_unused_parameters=True) accelerator = Accelerator(kwargs_handlers=[ddp_scaler]) task = "mrpc" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) if args.peft_type == "p_tuning": peft_config = PromptEncoderConfig( task_type="SEQ_CLS", num_virtual_tokens=args.num_virtual_tokens, encoder_hidden_size=args.encoder_hidden_size, ) elif args.peft_type == "prefix_tuning": peft_config = PrefixTuningConfig( task_type="SEQ_CLS", num_virtual_tokens=args.num_virtual_tokens, encoder_hidden_size=args.encoder_hidden_size, ) else: peft_config = PromptTuningConfig(task_type="SEQ_CLS", num_virtual_tokens=args.num_virtual_tokens) tokenizer_kwargs = {} if any(k in args.model_name_or_path for k in ("gpt", "opt", "bloom")): tokenizer_kwargs["padding_side"] = "left" else: tokenizer_kwargs["padding_side"] = "right" tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, **tokenizer_kwargs) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id datasets = load_dataset("glue", task) metric = evaluate.load("glue", task) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") with accelerator.main_process_first(): tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") # Instantiate dataloaders. train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=args.per_device_eval_batch_size, ) model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() if getattr(accelerator.state, "fsdp_plugin", None) is not None: accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(model) model = accelerator.prepare(model) optimizer = AdamW(params=model.parameters(), lr=args.learning_rate) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=(len(train_dataloader) * args.num_train_epochs), ) if getattr(accelerator.state, "fsdp_plugin", None) is not None: train_dataloader, eval_dataloader, optimizer, lr_scheduler = accelerator.prepare( train_dataloader, eval_dataloader, optimizer, lr_scheduler ) else: model, train_dataloader, eval_dataloader, optimizer, lr_scheduler = accelerator.prepare( model, train_dataloader, eval_dataloader, optimizer, lr_scheduler ) for epoch in range(args.num_train_epochs): model.train() for step, batch in enumerate(tqdm(train_dataloader)): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() samples_seen = 0 for step, batch in enumerate(tqdm(eval_dataloader)): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather((predictions, batch["labels"])) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.num_processes > 1: if step == len(eval_dataloader) - 1: predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] references = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() accelerator.print(f"epoch {epoch}:", eval_metric) accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, state_dict=accelerator.get_state_dict(model)) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) if __name__ == "__main__": main()
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/sequence_classification/IA3.ipynb
import argparse import os import torch from torch.optim import AdamW from torch.utils.data import DataLoader import peft import evaluate from datasets import load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from tqdm import tqdmbatch_size = 8 model_name_or_path = "roberta-large" task = "mrpc" peft_type = peft.PeftType.IA3 device = "cuda" num_epochs = 12# peft_config = LoraConfig(task_type="SEQ_CLS", inference_mode=False, r=8, lora_alpha=16, lora_dropout=0.1) peft_config = peft.IA3Config(task_type="SEQ_CLS", inference_mode=False) lr = 1e-3if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")): padding_side = "left" else: padding_side = "right" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id datasets = load_dataset("glue", task) metric = evaluate.load("glue", task) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size ) test_dataloader = DataLoader(tokenized_datasets["test"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size)model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True) model = peft.get_peft_model(model, peft_config) model.print_trainable_parameters() modeloptimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0.06 * (len(train_dataloader) * num_epochs), num_training_steps=(len(train_dataloader) * num_epochs), )model.to(device) for epoch in range(num_epochs): model.train() for step, batch in enumerate(tqdm(train_dataloader)): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(f"epoch {epoch}:", eval_metric)model.push_to_hub("SumanthRH/roberta-large-peft-ia3", use_auth_token=True)import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "SumanthRH/roberta-large-peft-ia3" config = PeftConfig.from_pretrained(peft_model_id) inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model inference_model = PeftModel.from_pretrained(inference_model, peft_model_id) inference_model.to(device) inference_model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = inference_model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(eval_metric)
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/sequence_classification/P_Tuning.ipynb
import argparse import os import torch from torch.optim import AdamW from torch.utils.data import DataLoader from peft import ( get_peft_config, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict, PeftType, PrefixTuningConfig, PromptEncoderConfig, ) import evaluate from datasets import load_dataset from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from tqdm import tqdmbatch_size = 32 model_name_or_path = "roberta-large" task = "mrpc" peft_type = PeftType.P_TUNING device = "cuda" num_epochs = 20peft_config = PromptEncoderConfig(task_type="SEQ_CLS", num_virtual_tokens=20, encoder_hidden_size=128) lr = 1e-3if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")): padding_side = "left" else: padding_side = "right" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side) if getattr(tokenizer, "pad_token_id") is None: tokenizer.pad_token_id = tokenizer.eos_token_id datasets = load_dataset("glue", task) metric = evaluate.load("glue", task) def tokenize_function(examples): # max_length=None => use the model max length (it's actually the default) outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None) return outputs tokenized_datasets = datasets.map( tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"], ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library tokenized_datasets = tokenized_datasets.rename_column("label", "labels") def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") # Instantiate dataloaders. train_dataloader = DataLoader(tokenized_datasets["train"], shuffle=True, collate_fn=collate_fn, batch_size=batch_size) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, collate_fn=collate_fn, batch_size=batch_size )model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True) model = get_peft_model(model, peft_config) model.print_trainable_parameters() modeloptimizer = AdamW(params=model.parameters(), lr=lr) # Instantiate scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, # 0.06*(len(train_dataloader) * num_epochs), num_training_steps=(len(train_dataloader) * num_epochs), )model.to(device) for epoch in range(num_epochs): model.train() for step, batch in enumerate(tqdm(train_dataloader)): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(f"epoch {epoch}:", eval_metric)model.push_to_hub("smangrul/roberta-large-peft-p-tuning", use_auth_token=True)import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "smangrul/roberta-large-peft-p-tuning" config = PeftConfig.from_pretrained(peft_model_id) inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model inference_model = PeftModel.from_pretrained(inference_model, peft_model_id) inference_model.to(device) inference_model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): batch.to(device) with torch.no_grad(): outputs = inference_model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = predictions, batch["labels"] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() print(eval_metric)
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/feature_extraction/requirements.txt
git+https://github.com/huggingface/peft git+https://github.com/huggingface/accelerate git+https://github.com/huggingface/transformers datasets evaluate hnswlib pandas tqdm huggingface_hub wandb
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/feature_extraction/peft_lora_embedding_semantic_search.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import logging import math import os import random from pathlib import Path import datasets import evaluate import torch import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import DatasetDict, load_dataset from huggingface_hub import Repository, create_repo from torch import nn from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModel, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler from transformers.utils import get_full_repo_name from peft import LoraConfig, TaskType, get_peft_model logger = get_logger(__name__) def parse_args(): parser = argparse.ArgumentParser(description="Training a PEFT model for Sematic Search task") parser.add_argument("--dataset_name", type=str, default=None, help="dataset name on HF hub") parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_length` is passed." ), ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument( "--sanity_test", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--use_peft", action="store_true", help="Whether to enable experiment trackers for logging.", ) args = parser.parse_args() if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def save_model_hook(models, weights, output_dir): for i, model in enumerate(models): model.save_pretrained(output_dir, state_dict=weights[i]) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): while len(models) > 0: model = models.pop() # pop models so that they are not loaded again if hasattr(model, "active_adapter") and hasattr(model, "load_adapter"): model.load_adapter(input_dir, model.active_adapter, is_trainable=True) class AutoModelForSentenceEmbedding(nn.Module): def __init__(self, model_name, tokenizer, normalize=True): super(AutoModelForSentenceEmbedding, self).__init__() self.model = AutoModel.from_pretrained(model_name) # , load_in_8bit=True, device_map={"":0}) self.normalize = normalize self.tokenizer = tokenizer def forward(self, **kwargs): model_output = self.model(**kwargs) embeddings = self.mean_pooling(model_output, kwargs["attention_mask"]) if self.normalize: embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) return embeddings def mean_pooling(self, model_output, attention_mask): token_embeddings = model_output[0] # First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.model, name) def get_cosing_embeddings(query_embs, product_embs): return torch.sum(query_embs * product_embs, axis=1) def get_loss(cosine_score, labels): return torch.mean(torch.square(labels * (1 - cosine_score) + torch.clamp((1 - labels) * cosine_score, min=0.0))) def main(): args = parse_args() accelerator_kwargs = {"gradient_accumulation_steps": args.gradient_accumulation_steps} if args.with_tracking: accelerator_kwargs["log_with"] = args.report_to accelerator_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(**accelerator_kwargs) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id create_repo(repo_name, exist_ok=True, token=args.hub_token) repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token) with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # get the tokenizer tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) # dataset download and preprocessing if args.sanity_test: train_dataset = load_dataset("smangrul/amazon_esci", split="train[:1024]") val_dataset = load_dataset("smangrul/amazon_esci", split="validation[:1024]") dataset = DatasetDict({"train": train_dataset, "validation": val_dataset}) else: dataset = load_dataset(args.dataset_name) def preprocess_function(examples): queries = examples["query"] result = tokenizer(queries, padding="max_length", max_length=70, truncation=True) result = {f"query_{k}": v for k, v in result.items()} products = examples["product_title"] result_products = tokenizer(products, padding="max_length", max_length=70, truncation=True) for k, v in result_products.items(): result[f"product_{k}"] = v result["labels"] = examples["relevance_label"] return result processed_datasets = dataset.map( preprocess_function, batched=True, remove_columns=dataset["train"].column_names, desc="Running tokenizer on dataset", ) # Log a few random samples from the training set: for index in random.sample(range(len(processed_datasets["train"])), 3): logger.info(f"Sample {index} of the training set: {processed_datasets['train'][index]}.") # base model model = AutoModelForSentenceEmbedding(args.model_name_or_path, tokenizer) if args.use_peft: # peft config and wrapping peft_config = LoraConfig( r=8, lora_alpha=16, bias="none", task_type=TaskType.FEATURE_EXTRACTION, target_modules=["key", "query", "value"], ) model = get_peft_model(model, peft_config) model.print_trainable_parameters() accelerator.print(model) # get dataloaders train_dataloader = DataLoader( processed_datasets["train"], shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size, pin_memory=True, ) eval_dataloader = DataLoader( processed_datasets["validation"], shuffle=False, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size, pin_memory=True, ) optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("peft_semantic_search", experiment_config) metric = evaluate.load("roc_auc") total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps if args.use_peft: # saving and loading checkpoints for resuming training accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) logger.info("***** Running training *****") logger.info(f" Num examples = {len(processed_datasets['train'])}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): with accelerator.accumulate(model): query_embs = model(**{k.replace("query_", ""): v for k, v in batch.items() if "query" in k}) product_embs = model(**{k.replace("product_", ""): v for k, v in batch.items() if "product" in k}) loss = get_loss(get_cosing_embeddings(query_embs, product_embs), batch["labels"]) total_loss += accelerator.reduce(loss.detach().float(), reduction="sum") accelerator.backward(loss) optimizer.step() lr_scheduler.step() model.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if (step + 1) % 100 == 0: logger.info(f"Step: {step+1}, Loss: {total_loss/(step+1)}") if args.with_tracking: accelerator.log({"train/loss": total_loss / (step + 1)}, step=completed_steps) if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0: output_dir = f"step_{completed_steps }" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if completed_steps >= args.max_train_steps: break model.eval() for step, batch in enumerate(eval_dataloader): with torch.no_grad(): query_embs = model(**{k.replace("query_", ""): v for k, v in batch.items() if "query" in k}) product_embs = model(**{k.replace("product_", ""): v for k, v in batch.items() if "product" in k}) prediction_scores = get_cosing_embeddings(query_embs, product_embs) prediction_scores, references = accelerator.gather_for_metrics((prediction_scores, batch["labels"])) metric.add_batch( prediction_scores=prediction_scores, references=references, ) result = metric.compute() result = {f"eval/{k}": v for k, v in result.items()} # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", result) if args.with_tracking: result["train/epoch_loss"] = total_loss.item() / len(train_dataloader) accelerator.log(result, step=completed_steps) if args.output_dir is not None: accelerator.wait_for_everyone() if accelerator.is_main_process: if isinstance(checkpointing_steps, str): accelerator.save_state(os.path.join(args.output_dir, f"epoch_{epoch}")) accelerator.unwrap_model(model).save_pretrained( args.output_dir, state_dict=accelerator.get_state_dict(accelerator.unwrap_model(model)) ) tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: commit_message = ( f"Training in progress epoch {epoch}" if epoch < args.num_train_epochs - 1 else "End of training" ) repo.push_to_hub(commit_message=commit_message, blocking=False, auto_lfs_prune=True) accelerator.wait_for_everyone() accelerator.end_training() if __name__ == "__main__": main()
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/feature_extraction/peft_lora_embedding_semantic_similarity_inference.ipynb
import argparse import json import logging import math import os import random from pathlib import Path from tqdm import tqdm import datasets from datasets import load_dataset, DatasetDict import evaluate import torch from torch import nn from torch.utils.data import DataLoader import transformers from transformers import AutoTokenizer, AutoModel, default_data_collator, SchedulerType, get_scheduler from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version from huggingface_hub import Repository, create_repo from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from peft import PeftModel import hnswlibclass AutoModelForSentenceEmbedding(nn.Module): def __init__(self, model_name, tokenizer, normalize=True): super(AutoModelForSentenceEmbedding, self).__init__() self.model = AutoModel.from_pretrained(model_name) # , load_in_8bit=True, device_map={"":0}) self.normalize = normalize self.tokenizer = tokenizer def forward(self, **kwargs): model_output = self.model(**kwargs) embeddings = self.mean_pooling(model_output, kwargs["attention_mask"]) if self.normalize: embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) return embeddings def mean_pooling(self, model_output, attention_mask): token_embeddings = model_output[0] # First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.model, name) def get_cosing_embeddings(query_embs, product_embs): return torch.sum(query_embs * product_embs, axis=1)model_name_or_path = "intfloat/e5-large-v2" peft_model_id = "smangrul/peft_lora_e5_semantic_search" dataset_name = "smangrul/amazon_esci" max_length = 70 batch_size = 256import pandas as pd tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) dataset = load_dataset(dataset_name) train_product_dataset = dataset["train"].to_pandas()[["product_title"]] val_product_dataset = dataset["validation"].to_pandas()[["product_title"]] product_dataset_for_indexing = pd.concat([train_product_dataset, val_product_dataset]) product_dataset_for_indexing = product_dataset_for_indexing.drop_duplicates() product_dataset_for_indexing.reset_index(drop=True, inplace=True) product_dataset_for_indexing.reset_index(inplace=True)product_dataset_for_indexingpd.set_option("max_colwidth", 300) product_dataset_for_indexing.sample(10)from datasets import Dataset dataset = Dataset.from_pandas(product_dataset_for_indexing) def preprocess_function(examples): products = examples["product_title"] result = tokenizer(products, padding="max_length", max_length=70, truncation=True) return result processed_dataset = dataset.map( preprocess_function, batched=True, remove_columns=dataset.column_names, desc="Running tokenizer on dataset", ) processed_dataset# base model model = AutoModelForSentenceEmbedding(model_name_or_path, tokenizer) # peft config and wrapping model = PeftModel.from_pretrained(model, peft_model_id) print(model)dataloader = DataLoader( processed_dataset, shuffle=False, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True, )next(iter(dataloader))ids_to_products_dict = {i: p for i, p in zip(dataset["index"], dataset["product_title"])} ids_to_products_dictdevice = "cuda" model.to(device) model.eval() model = model.merge_and_unload()import numpy as np num_products = len(dataset) d = 1024 product_embeddings_array = np.zeros((num_products, d)) for step, batch in enumerate(tqdm(dataloader)): with torch.no_grad(): with torch.amp.autocast(dtype=torch.bfloat16, device_type="cuda"): product_embs = model(**{k: v.to(device) for k, v in batch.items()}).detach().float().cpu() start_index = step * batch_size end_index = start_index + batch_size if (start_index + batch_size) < num_products else num_products product_embeddings_array[start_index:end_index] = product_embs del product_embs, batchdef construct_search_index(dim, num_elements, data): # Declaring index search_index = hnswlib.Index(space="ip", dim=dim) # possible options are l2, cosine or ip # Initializing index - the maximum number of elements should be known beforehand search_index.init_index(max_elements=num_elements, ef_construction=200, M=100) # Element insertion (can be called several times): ids = np.arange(num_elements) search_index.add_items(data, ids) return search_indexproduct_search_index = construct_search_index(d, num_products, product_embeddings_array)def get_query_embeddings(query, model, tokenizer, device): inputs = tokenizer(query, padding="max_length", max_length=70, truncation=True, return_tensors="pt") model.eval() with torch.no_grad(): query_embs = model(**{k: v.to(device) for k, v in inputs.items()}).detach().cpu() return query_embs[0] def get_nearest_neighbours(k, search_index, query_embeddings, ids_to_products_dict, threshold=0.7): # Controlling the recall by setting ef: search_index.set_ef(100) # ef should always be > k # Query dataset, k - number of the closest elements (returns 2 numpy arrays) labels, distances = search_index.knn_query(query_embeddings, k=k) return [ (ids_to_products_dict[label], (1 - distance)) for label, distance in zip(labels[0], distances[0]) if (1 - distance) >= threshold ]query = "NLP and ML books" k = 10 query_embeddings = get_query_embeddings(query, model, tokenizer, device) search_results = get_nearest_neighbours(k, product_search_index, query_embeddings, ids_to_products_dict, threshold=0.7) print(f"{query=}") for product, cosine_sim_score in search_results: print(f"cosine_sim_score={round(cosine_sim_score,2)} {product=}")
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/causal_language_modeling/accelerate_ds_zero3_cpu_offload_config.yaml
compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: none offload_param_device: none zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} machine_rank: 0 main_training_function: main megatron_lm_config: {} mixed_precision: 'no' num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true use_cpu: false
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/causal_language_modeling/peft_lora_clm_accelerate_big_model_inference.ipynb
from transformers import AutoModelForCausalLM from peft import PeftModel, PeftConfig import torch from datasets import load_dataset import os from transformers import AutoTokenizer from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from tqdm import tqdm from datasets import load_dataset device = "cuda" model_name_or_path = "bigscience/bloomz-7b1" tokenizer_name_or_path = "bigscience/bloomz-7b1" dataset_name = "twitter_complaints" text_column = "Tweet text" label_column = "text_label" max_length = 64 lr = 1e-3 num_epochs = 50 batch_size = 8from datasets import load_dataset dataset = load_dataset("ought/raft", dataset_name) classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names] print(classes) dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) print(dataset) dataset["train"][0]# data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) if tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) print(target_max_length) def preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] targets = [str(x) for x in examples[label_column]] model_inputs = tokenizer(inputs) labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] + [tokenizer.eos_token_id] # print(i, sample_input_ids, label_input_ids) model_inputs["input_ids"][i] = sample_input_ids + label_input_ids labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i]) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length]) model_inputs["labels"] = labels["input_ids"] return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True )def test_preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] model_inputs = tokenizer(inputs) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) return model_inputs processed_datasets = dataset.map( test_preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) eval_dataset = processed_datasets["train"] test_dataset = processed_datasets["test"] eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) test_dataloader = DataLoader(test_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) print(next(iter(eval_dataloader))) print(next(iter(test_dataloader)))from peft import PeftModel, PeftConfig max_memory = {0: "1GIB", 1: "1GIB", 2: "2GIB", 3: "10GIB", "cpu": "30GB"} peft_model_id = "smangrul/twitter_complaints_bigscience_bloomz-7b1_LORA_CAUSAL_LM" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, device_map="auto", max_memory=max_memory) model = PeftModel.from_pretrained(model, peft_model_id, device_map="auto", max_memory=max_memory)# modelmodel.hf_device_mapmodel.eval() i = 89 inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt") print(dataset["test"][i]["Tweet text"]) print(inputs) with torch.no_grad(): outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))model.eval() eval_preds = [] for _, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = model.generate(**batch, max_new_tokens=10) preds = outputs[:, max_length:].detach().cpu().numpy() eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True))correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["train"][label_column]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 print(f"{accuracy=}") print(f"{eval_preds[:10]=}") print(f"{dataset['train'][label_column][:10]=}")model.eval() test_preds = [] for _, batch in enumerate(tqdm(test_dataloader)): batch = {k: v for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = model.generate(**batch, max_new_tokens=10) preds = outputs[:, max_length:].detach().cpu().numpy() test_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) if len(test_preds) > 100: break test_preds
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/causal_language_modeling/requirements.txt
transformers accelerate evaluate deepspeed tqdm datasets
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/causal_language_modeling/peft_prefix_tuning_clm.ipynb
from transformers import AutoModelForCausalLM from peft import get_peft_config, get_peft_model, PrefixTuningConfig, TaskType, PeftType import torch from datasets import load_dataset import os from transformers import AutoTokenizer from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from tqdm import tqdm from datasets import load_dataset device = "cuda" model_name_or_path = "bigscience/bloomz-560m" tokenizer_name_or_path = "bigscience/bloomz-560m" peft_config = PrefixTuningConfig(task_type=TaskType.CAUSAL_LM, num_virtual_tokens=30) dataset_name = "twitter_complaints" checkpoint_name = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}_v1.pt".replace( "/", "_" ) text_column = "Tweet text" label_column = "text_label" max_length = 64 lr = 3e-2 num_epochs = 50 batch_size = 8from datasets import load_dataset dataset = load_dataset("ought/raft", dataset_name) classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names] print(classes) dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) print(dataset) dataset["train"][0]# data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) if tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) print(target_max_length) def preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] targets = [str(x) for x in examples[label_column]] model_inputs = tokenizer(inputs) labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] + [tokenizer.eos_token_id] # print(i, sample_input_ids, label_input_ids) model_inputs["input_ids"][i] = sample_input_ids + label_input_ids labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i]) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length]) model_inputs["labels"] = labels["input_ids"] return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["train"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)def test_preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] model_inputs = tokenizer(inputs) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) return model_inputs test_dataset = dataset["test"].map( test_preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) test_dataloader = DataLoader(test_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) next(iter(test_dataloader))next(iter(train_dataloader))len(test_dataloader)next(iter(test_dataloader))# creating model model = AutoModelForCausalLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters()model.print_trainable_parameters()modelmodel.peft_config# model # optimizer and lr scheduler optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), )# training and evaluation model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} # print(batch) # print(batch["input_ids"].shape) outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")model.eval() i = 16 inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt") print(dataset["test"][i]["Tweet text"]) print(inputs) with torch.no_grad(): inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3 ) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))# saving model peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace( "/", "_" ) model.save_pretrained(peft_model_id)ckpt = f"{peft_model_id}/adapter_model.bin" !du -h $ckptfrom peft import PeftModel, PeftConfig peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace( "/", "_" ) config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id)model.to(device) model.eval() i = 4 inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt") print(dataset["test"][i]["Tweet text"]) print(inputs) with torch.no_grad(): inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3 ) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/causal_language_modeling/peft_prompt_tuning_clm.ipynb
from transformers import AutoModelForCausalLM from peft import get_peft_config, get_peft_model, PromptTuningInit, PromptTuningConfig, TaskType, PeftType import torch from datasets import load_dataset import os from transformers import AutoTokenizer from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from tqdm import tqdm from datasets import load_dataset device = "cuda" model_name_or_path = "bigscience/bloomz-560m" tokenizer_name_or_path = "bigscience/bloomz-560m" peft_config = PromptTuningConfig( task_type=TaskType.CAUSAL_LM, prompt_tuning_init=PromptTuningInit.TEXT, num_virtual_tokens=8, prompt_tuning_init_text="Classify if the tweet is a complaint or not:", tokenizer_name_or_path=model_name_or_path, ) dataset_name = "twitter_complaints" checkpoint_name = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}_v1.pt".replace( "/", "_" ) text_column = "Tweet text" label_column = "text_label" max_length = 64 lr = 3e-2 num_epochs = 50 batch_size = 8from datasets import load_dataset dataset = load_dataset("ought/raft", dataset_name) classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names] print(classes) dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) print(dataset) dataset["train"][0]# data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) if tokenizer.pad_token_id is None: tokenizer.pad_token_id = tokenizer.eos_token_id target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) print(target_max_length) def preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] targets = [str(x) for x in examples[label_column]] model_inputs = tokenizer(inputs) labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] + [tokenizer.eos_token_id] # print(i, sample_input_ids, label_input_ids) model_inputs["input_ids"][i] = sample_input_ids + label_input_ids labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i]) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length]) model_inputs["labels"] = labels["input_ids"] return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["train"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)def test_preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] model_inputs = tokenizer(inputs) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) return model_inputs test_dataset = dataset["test"].map( test_preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) test_dataloader = DataLoader(test_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) next(iter(test_dataloader))next(iter(train_dataloader))len(test_dataloader)next(iter(test_dataloader))# creating model model = AutoModelForCausalLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters()# model # optimizer and lr scheduler optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), )# training and evaluation model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} # print(batch) # print(batch["input_ids"].shape) outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")model.eval() i = 33 inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt") print(dataset["test"][i]["Tweet text"]) print(inputs) with torch.no_grad(): inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3 ) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))# saving model peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace( "/", "_" ) model.save_pretrained(peft_model_id)ckpt = f"{peft_model_id}/adapter_model.bin" !du -h $ckptfrom peft import PeftModel, PeftConfig peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace( "/", "_" ) config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id)model.to(device) model.eval() i = 4 inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt") print(dataset["test"][i]["Tweet text"]) print(inputs) with torch.no_grad(): inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3 ) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/causal_language_modeling/peft_lora_clm_accelerate_ds_zero3_offload.py
import gc import os import sys import threading import numpy as np import psutil import torch from accelerate import Accelerator from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( AutoModelForCausalLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup, set_seed, ) from peft import LoraConfig, TaskType, get_peft_model def levenshtein_distance(str1, str2): # TC: O(N^2) # SC: O(N^2) if str1 == str2: return 0 num_rows = len(str1) + 1 num_cols = len(str2) + 1 dp_matrix = np.empty((num_rows, num_cols)) dp_matrix[0, :] = range(num_cols) dp_matrix[:, 0] = range(num_rows) for i in range(1, num_rows): for j in range(1, num_cols): if str1[i - 1] == str2[j - 1]: dp_matrix[i, j] = dp_matrix[i - 1, j - 1] else: dp_matrix[i, j] = min(dp_matrix[i - 1, j - 1], dp_matrix[i - 1, j], dp_matrix[i, j - 1]) + 1 return dp_matrix[num_rows - 1, num_cols - 1] def get_closest_label(eval_pred, classes): min_id = sys.maxsize min_edit_distance = sys.maxsize for i, class_label in enumerate(classes): edit_distance = levenshtein_distance(eval_pred.strip(), class_label) if edit_distance < min_edit_distance: min_id = i min_edit_distance = edit_distance return classes[min_id] # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() self.process = psutil.Process() self.cpu_begin = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() return self def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_peak = -1 while True: self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def __exit__(self, *exc): self.peak_monitoring = False gc.collect() torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) self.cpu_end = self.cpu_mem_used() self.cpu_used = b2mb(self.cpu_end - self.cpu_begin) self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def main(): accelerator = Accelerator() model_name_or_path = "bigscience/bloomz-7b1" dataset_name = "twitter_complaints" peft_config = LoraConfig(task_type=TaskType.CAUSAL_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1) text_column = "Tweet text" label_column = "text_label" lr = 3e-3 num_epochs = 20 batch_size = 8 seed = 42 max_length = 64 do_test = False set_seed(seed) dataset = load_dataset("ought/raft", dataset_name) classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names] dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] targets = [str(x) for x in examples[label_column]] model_inputs = tokenizer(inputs) labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] + [tokenizer.eos_token_id] model_inputs["input_ids"][i] = sample_input_ids + label_input_ids labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i]) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length]) model_inputs["labels"] = labels["input_ids"] return model_inputs def test_preprocess_function(examples): batch_size = len(examples[text_column]) inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]] model_inputs = tokenizer(inputs) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length]) model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length]) return model_inputs with accelerator.main_process_first(): processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=True, desc="Running tokenizer on dataset", ) accelerator.wait_for_everyone() train_dataset = processed_datasets["train"] with accelerator.main_process_first(): processed_datasets = dataset.map( test_preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) eval_dataset = processed_datasets["train"] test_dataset = processed_datasets["test"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) test_dataloader = DataLoader( test_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) print(next(iter(train_dataloader))) # creating model model = AutoModelForCausalLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() # optimizer optimizer = torch.optim.AdamW(model.parameters(), lr=lr) # lr scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler = accelerator.prepare( model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler ) accelerator.print(model) is_ds_zero_3 = False if getattr(accelerator.state, "deepspeed_plugin", None): is_ds_zero_3 = accelerator.state.deepspeed_plugin.zero_stage == 3 for epoch in range(num_epochs): with TorchTracemalloc() as tracemalloc: model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("GPU Memory before entering the train : {}".format(b2mb(tracemalloc.begin))) accelerator.print("GPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used)) accelerator.print("GPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked)) accelerator.print( "GPU Total Peak Memory consumed during the train (max): {}".format( tracemalloc.peaked + b2mb(tracemalloc.begin) ) ) accelerator.print("CPU Memory before entering the train : {}".format(b2mb(tracemalloc.cpu_begin))) accelerator.print("CPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.cpu_used)) accelerator.print("CPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.cpu_peaked)) accelerator.print( "CPU Total Peak Memory consumed during the train (max): {}".format( tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin) ) ) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) accelerator.print(f"{epoch=}: {train_ppl=} {train_epoch_loss=}") model.eval() eval_preds = [] with TorchTracemalloc() as tracemalloc: for _, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = accelerator.unwrap_model(model).generate( **batch, synced_gpus=is_ds_zero_3, max_new_tokens=10 ) # synced_gpus=True for DS-stage 3 outputs = accelerator.pad_across_processes(outputs, dim=1, pad_index=tokenizer.pad_token_id) preds = accelerator.gather_for_metrics(outputs) preds = preds[:, max_length:].detach().cpu().numpy() eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("GPU Memory before entering the eval : {}".format(b2mb(tracemalloc.begin))) accelerator.print("GPU Memory consumed at the end of the eval (end-begin): {}".format(tracemalloc.used)) accelerator.print("GPU Peak Memory consumed during the eval (max-begin): {}".format(tracemalloc.peaked)) accelerator.print( "GPU Total Peak Memory consumed during the eval (max): {}".format( tracemalloc.peaked + b2mb(tracemalloc.begin) ) ) accelerator.print("CPU Memory before entering the eval : {}".format(b2mb(tracemalloc.cpu_begin))) accelerator.print("CPU Memory consumed at the end of the eval (end-begin): {}".format(tracemalloc.cpu_used)) accelerator.print("CPU Peak Memory consumed during the eval (max-begin): {}".format(tracemalloc.cpu_peaked)) accelerator.print( "CPU Total Peak Memory consumed during the eval (max): {}".format( tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin) ) ) correct = 0 total = 0 assert len(eval_preds) == len( dataset["train"][label_column] ), f"{len(eval_preds)} != {len(dataset['train'][label_column])}" for pred, true in zip(eval_preds, dataset["train"][label_column]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 accelerator.print(f"{accuracy=}") accelerator.print(f"{eval_preds[:10]=}") accelerator.print(f"{dataset['train'][label_column][:10]=}") if do_test: model.eval() test_preds = [] for _, batch in enumerate(tqdm(test_dataloader)): batch = {k: v for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = accelerator.unwrap_model(model).generate( **batch, synced_gpus=is_ds_zero_3, max_new_tokens=10 ) # synced_gpus=True for DS-stage 3 outputs = accelerator.pad_across_processes(outputs, dim=1, pad_index=tokenizer.pad_token_id) preds = accelerator.gather(outputs) preds = preds[:, max_length:].detach().cpu().numpy() test_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) test_preds_cleaned = [] for _, pred in enumerate(test_preds): test_preds_cleaned.append(get_closest_label(pred, classes)) test_df = dataset["test"].to_pandas() assert len(test_preds_cleaned) == len(test_df), f"{len(test_preds_cleaned)} != {len(test_df)}" test_df[label_column] = test_preds_cleaned test_df["text_labels_orig"] = test_preds accelerator.print(test_df[[text_column, label_column]].sample(20)) pred_df = test_df[["ID", label_column]] pred_df.columns = ["ID", "Label"] os.makedirs(f"data/{dataset_name}", exist_ok=True) pred_df.to_csv(f"data/{dataset_name}/predictions.csv", index=False) accelerator.wait_for_everyone() # Option1: Pushing the model to Hugging Face Hub # model.push_to_hub( # f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"), # token = "hf_..." # ) # token (`bool` or `str`, *optional*): # `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated # when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` # is not specified. # Or you can get your token from https://huggingface.co/settings/token # Option2: Saving the model locally peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace( "/", "_" ) model.save_pretrained(peft_model_id) accelerator.wait_for_everyone() if __name__ == "__main__": main()
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/causal_language_modeling/peft_lora_clm_with_additional_tokens.ipynb
import os os.environ["CUDA_VISIBLE_DEVICES"] = "3" os.environ["WANDB_PROJECT"] = "PeftExamples" import transformers from peft import ( LoraConfig, PeftConfig, PeftModel, get_peft_model, prepare_model_for_int8_training, ) from transformers import ( AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, TrainingArguments, Trainer, default_data_collator, ) import torch from dataclasses import dataclass, field from typing import Optional from dataclass_csv import DataclassReader from torch.utils.data import Dataset, DataLoader from enum import Enumclass SpecialTokens(str, Enum): begin_target = "<|begintarget|>" end_target = "<|endtarget|>" begin_context = "<|begincontext|>" end_context = "<|endcontext|>" system = "<|system|>" user = "<|user|>" begin_last_user_utterance = "<|beginlastuserutterance|>" end_last_user_utterance = "<|endlastuserutterance|>" begin_dsts = "<|begindsts|>" end_dsts = "<|enddsts|>" begin_dst = "<|begindst|>" end_dst = "<|enddst|>" begin_belief = "<|beginbelief|>" end_belief = "<|endbelief|>" begin_response = "<|beginresponse|>" end_response = "<|endresponse|>" begin_action = "<|beginaction|>" end_action = "<|endaction|>" begin_user_action = "<|beginuseraction|>" end_user_action = "<|enduseraction|>" sys_actions = "<|sysactions|>" begin_intent = "<|beginintent|>" end_intent = "<|endintent|>" begin_requested_slots = "<|beginrequestedslots|>" end_requested_slots = "<|endrequestedslots|>" pad_token = "<|pad|>" bos_token = "<|startoftext|>" @classmethod def list(cls): return [c.value for c in cls]model_name = "mistralai/Mistral-7B-v0.1" tokenizer = AutoTokenizer.from_pretrained( model_name, pad_token=SpecialTokens.pad_token.value, bos_token=SpecialTokens.bos_token.value, eos_token=SpecialTokens.end_target.value, additional_special_tokens=SpecialTokens.list(), ) model = AutoModelForCausalLM.from_pretrained( model_name, low_cpu_mem_usage=True # use_flash_attention_2=True, # leading to an error ) model.resize_token_embeddings(len(tokenizer))config = LoraConfig( r=64, lora_alpha=128, lora_dropout=0.0, target_modules=["embed_tokens", "lm_head", "q_proj", "v_proj"] ) model = get_peft_model(model, config) print(model.print_trainable_parameters()) print(model)from datasets import load_dataset dataset = load_dataset("smangrul/assistant_chatbot_dataset") dataset = dataset["train"].train_test_split(0.2) text_column = "context" label_column = "target" max_length = 512 def preprocess_function(examples): batch_size = len(examples[text_column]) targets = [str(x) for x in examples[label_column]] model_inputs = tokenizer(examples[text_column]) labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] + [tokenizer.eos_token_id] # print(i, sample_input_ids, label_input_ids) model_inputs["input_ids"][i] = sample_input_ids + label_input_ids labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i]) # print(model_inputs) for i in range(batch_size): sample_input_ids = model_inputs["input_ids"][i] label_input_ids = labels["input_ids"][i] model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * ( max_length - len(sample_input_ids) ) + sample_input_ids model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[ "attention_mask" ][i] labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids model_inputs["input_ids"][i] = model_inputs["input_ids"][i][:max_length] model_inputs["attention_mask"][i] = model_inputs["attention_mask"][i][:max_length] labels["input_ids"][i] = labels["input_ids"][i][:max_length] model_inputs["labels"] = labels["input_ids"] return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"]train_datasettrain_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=8, pin_memory=True )next(iter(train_dataloader))tokenizer.decode(train_dataset[0]["input_ids"])training_args = TrainingArguments( output_dir="mistral_lora_clm_with_added_tokens", num_train_epochs=2, save_total_limit=5, per_device_train_batch_size=8, warmup_steps=10, weight_decay=0.0001, dataloader_drop_last=True, bf16=True, logging_steps=10, learning_rate=1e-5, gradient_checkpointing=True, gradient_checkpointing_kwargs={"use_reentrant": False}, remove_unused_columns=False, hub_model_id="smangrul/mistral_lora_clm_with_added_tokens", push_to_hub=True, hub_private_repo=True, ) trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, data_collator=default_data_collator, ) # model.config.use_cache = False trainer.train()import random i = random.randint(0, len(dataset["test"])) context = dataset["test"][i]["context"] batch = tokenizer(context, return_tensors="pt") batch = {k: v.to("cuda") for k, v in batch.items()} model.eval() output_tokens = model.generate( **batch, max_new_tokens=256, do_sample=True, temperature=0.2, top_p=0.95, top_k=50, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, ) target_predicted = tokenizer.decode(output_tokens[0], skip_special_tokens=False).split("<|endcontext|>")[1] target = dataset["test"][i]["target"] print(f"{context=} \n\n {target_predicted=} \n\n {target=}")trainer.push_to_hub() trainer.model.push_to_hub(training_args.output_dir)from peft import PeftModel inference_model = AutoModelForCausalLM.from_pretrained( model_name, low_cpu_mem_usage=True, # use_flash_attention_2=True, ) inference_model.resize_token_embeddings(len(tokenizer)) inference_model = PeftModel.from_pretrained(inference_model, "smangrul/mistral_lora_clm_with_added_tokens") inference_model.to("cuda") inference_model.eval() output_tokens = inference_model.generate( **batch, max_new_tokens=256, do_sample=True, temperature=0.2, top_p=0.95, top_k=50, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, ) target_predicted = tokenizer.decode(output_tokens[0], skip_special_tokens=False).split("<|endcontext|>")[1] print(f"{context=} \n\n {target_predicted=} \n\n {target=}")
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/loftq_finetuning/README.md
# LoftQ: LoRA-fine-tuning-aware Quantization ## Introduction LoftQ provides better initialization for LoRA adapters A and B, and the Quantization of pre-trained weights W. ## Quantization We recommend to save the quantized backbone model as fp16/fp32 and load it as [NormalFloat4](https://arxiv.org/abs/2305.14314). We provide a simple example to show how to quantize llama-2-7b model and save/load it. ```sh python quantize_save_load.py \ --model_name_or_path meta-llama/Llama-2-7b-hf \ --token HF_TOKEN \ --bits 4 --iter 5 --rank 16 \ --save_dir model_zoo/loftq/ ``` - `HF_TOKEN` is the token used to access to [LLAMA models](https://huggingface.co/meta-llama). - `quantize_and_save()` function will quantize the backbone and initialize LoRA adapters. It creates 2 folders under `$save_dir`. The quantized backbone is at `Llama-2-7b-hf-4bit-16rank`, and the LoRA adapters are at the sub-folder `Llama-2-7b-hf-4bit-16rank/loftq_init`. ## Fine-tuning Here is an example to load the quantized backbone and LoRA adapters: ```python import os from transformers import AutoModelForCausalLM from peft import PeftModel base_model = AutoModelForCausalLM.from_pretrained( os.path.join(args.save_dir, "Llama-2-7b-hf-4bit-16rank"), load_in_4bit=True, ) peft_model = PeftModel.from_pretrained( base_model, os.path.join(args.save_dir, "Llama-2-7b-hf-4bit-16rank", "loftq_init"), is_trainable=True, ) ``` We also provide an example to fine-tune LoftQ on GSM8K. We load the quantized backbone and LoRA adapters from the [LoftQ Huggingface hub](https://huggingface.co/LoftQ). ```sh python train_gsm8k_llama.py \ --model_name_or_path LoftQ/Llama-2-7b-hf-4bit-64rank \ --output_dir exp_results/gsm8k/llama-2-7b/bit4-rank64/lr3e-4 \ --learning_rate 3e-4 \ --seed 202 \ --dataset_name gsm8k \ --dataset_config main \ --pad_to_max_length \ --max_source_length 128 \ --max_target_length 256 \ --num_train_epochs 5 \ --per_device_train_batch_size 4 \ --per_device_eval_batch_size 4 \ --gradient_accumulation_steps 4 \ --with_tracking \ --report_to tensorboard ```
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/loftq_finetuning/train_gsm8k_llama.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import copy import logging import math import os import random import re from pathlib import Path import datasets import torch import transformers from accelerate import Accelerator, DistributedType from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import load_dataset from huggingface_hub import Repository, create_repo from torch.utils.data import DataLoader from tqdm.auto import tqdm from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import send_example_telemetry from transformers.utils.versions import require_version from peft import PeftModel # Will error if the minimal version of Transformers is not installed. Remove at your own risks. # check_min_version("4.32.0.dev0") logger = get_logger(__name__) require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt") MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv, txt or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv, txt or a json file containing the validation data." ) parser.add_argument( "--validation_split_percentage", default=5, help="The percentage of the train set used as validation set in case there's no validation split", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument( "--ignore_pad_token_for_loss", type=bool, default=True, help="Whether to ignore the tokens corresponding to padded labels in the loss computation or not.", ) parser.add_argument( "--max_source_length", type=int, default=128, help=( "The maximum total input sequence length after " "tokenization.Sequences longer than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--max_target_length", type=int, default=128, help=( "The maximum total sequence length for target text after " "tokenization. Sequences longer than this will be truncated, sequences shorter will be padded." "during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--trust_remote_code", type=bool, default=False, help=( "Whether or not to allow for custom models defined on the Hub in their own modeling files. This option" "should only be set to `True` for repositories you trust and in which you have read the code, as it will" "execute code present on the Hub on your local machine." ), ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument( "--low_cpu_mem_usage", action="store_true", help=( "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded." "If passed, LLM loading time and RAM consumption will be benefited." ), ) ########################## # Generation Config # ########################## parser.add_argument( "--temperature", type=float, default=0.8, help="temperature of 1.0 has no effect, lower tend toward greedy sampling", ) parser.add_argument("--k", type=int, default=40, help="Choose k candidate words") parser.add_argument("--p", type=float, default=0.95, help="The sum of probability of candidate words is 0.9 ") ########################## # Exp Args # ########################## parser.add_argument( "--adapter_name_or_path", type=str, default=None, help=( "The LoRA adapter checkpoint. Set None if you want to fine-tune from LoftQ." "Specify a path if you want to evaluate." ), ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def main(): args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment accelerator_log_kwargs = {} if args.with_tracking: accelerator_log_kwargs["log_with"] = args.report_to accelerator_log_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: # Retrieve of infer repo_name repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name # Create repo and retrieve repo_id repo_id = create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id # Clone repo locally repo = Repository(args.output_dir, clone_from=repo_id, token=args.hub_token) with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name) if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[:{args.validation_split_percentage}%]", ) raw_datasets["train"] = load_dataset( args.dataset_name, args.dataset_config_name, split=f"train[{args.validation_split_percentage}%:]", ) else: data_files = {} dataset_args = {} if args.train_file is not None: data_files["train"] = args.train_file if args.validation_file is not None: data_files["validation"] = args.validation_file extension = args.train_file.split(".")[-1] if extension == "txt": extension = "text" dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args) # If no validation data is there, validation_split_percentage will be used to divide the dataset. if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{args.validation_split_percentage}%]", **dataset_args, ) raw_datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{args.validation_split_percentage}%:]", **dataset_args, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if args.config_name: config = AutoConfig.from_pretrained( args.config_name, trust_remote_code=args.trust_remote_code, ) elif args.model_name_or_path: config = AutoConfig.from_pretrained( args.model_name_or_path, trust_remote_code=args.trust_remote_code, ) else: config = CONFIG_MAPPING[args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code ) elif args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.model_name_or_path, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code, ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) ########################## # Tokenizer # ########################## tokenizer.pad_token_id = 0 # unk. we want this to be different from the eos token tokenizer.padding_side = "left" # Allow batched inference tokenizer.truncation_side = "left" if args.model_name_or_path: model = AutoModelForCausalLM.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, low_cpu_mem_usage=True, quantization_config=BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=False, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=config.torch_dtype, ), ) else: logger.info("Training new model from scratch") model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code) ########################## # Peft Model # ########################## if args.adapter_name_or_path is None: model = PeftModel.from_pretrained(model, args.model_name_or_path, subfolder="loftq_init", is_trainable=True) else: model = PeftModel.from_pretrained(model, args.adapter_name_or_path, is_trainable=True) model.print_trainable_parameters() # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embedding_size = model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. ########################## # GSM8K dataset # ########################## # Preprocessing the datasets. # First we tokenize all the texts. column_names = raw_datasets["train"].column_names # Get the column names for source/target. source_column, target_column = "question", "answer" # Temporarily set max_target_length for training. padding = "max_length" if args.pad_to_max_length else False task_prompt = "\nAnswer the above question. First think step by step and then answer the final number.\n" def prompt_process(sent_1, sent_2, prompt_1="", prompt_2="", prompt_3=""): sent_2 = sent_2.replace("####", "The final answer is") return prompt_1 + sent_1 + prompt_2 + sent_2 + prompt_3 def preprocess_function_train(examples): sources = examples[source_column] targets = examples[target_column] inputs = [prompt_process(source, target, prompt_2=task_prompt) for (source, target) in zip(sources, targets)] model_inputs = tokenizer( inputs, max_length=args.max_source_length + args.max_target_length, padding=padding, truncation=True, return_tensors="pt", ) labels = copy.deepcopy(model_inputs) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and args.ignore_pad_token_for_loss: # get the length of the target tokens. -1 to kick out the <BOS> token target_tokens = tokenizer(targets, padding=False) target_len = [len(label) - 1 for label in target_tokens["input_ids"]] # don't calculate the loss from source and padding (left padding) for i in range(len(labels["input_ids"])): labels["input_ids"][i, : -target_len[i]] = -100 model_inputs["labels"] = labels["input_ids"] return model_inputs def preprocess_function_test(examples): sources = examples[source_column] labels = examples[target_column] inputs = [source + task_prompt for source in sources] model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True) labels = tokenizer(labels, max_length=args.max_target_length, padding=padding, truncation=True) model_inputs["labels"] = labels["input_ids"] return model_inputs with accelerator.main_process_first(): train_dataset = raw_datasets["train"].map( preprocess_function_train, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on training dataset", ) eval_dataset = raw_datasets["test"].map( preprocess_function_test, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on test dataset", ) # Log a few random samples from the set: for index in random.sample(range(len(train_dataset)), 2): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") for index in random.sample(range(len(eval_dataset)), 2): logger.info(f"Sample {index} of the validation set: {eval_dataset[index]}.") # DataLoaders creation: train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "layer_norm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) and "lora" in n], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # On TPU, the tie weights in our model have been disconnected, so we need to restore the ties. if accelerator.distributed_type == DistributedType.TPU: model.tie_weights() # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("clm_no_trainer", experiment_config) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": checkpoint_path = args.resume_from_checkpoint path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last checkpoint_path = path path = os.path.basename(checkpoint_path) accelerator.print(f"Resumed from checkpoint: {checkpoint_path}") accelerator.load_state(path) # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) accelerator.print(f"Epoch: {epoch} | Step: {step} | Loss: {loss}") optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0: output_dir = f"step_{completed_steps}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if completed_steps >= args.max_train_steps: break model.eval() gen_kwargs = { "max_new_tokens": args.max_target_length, "temperature": args.temperature, "top_k": args.k, "top_p": args.p, "do_sample": True, } ans_pred_list = [] ans_gold_list = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): gen_kwargs["input_ids"] = batch["input_ids"] gen_kwargs["attention_mask"] = batch["attention_mask"] generated_tokens = accelerator.unwrap_model(model).generate(**gen_kwargs) pred_tokens = generated_tokens[:, args.max_source_length :] pred_tokens = accelerator.pad_across_processes(pred_tokens, dim=1, pad_index=tokenizer.pad_token_id) gold_tokens = batch["labels"] if not args.pad_to_max_length: # If we did not pad to max length, we need to pad the labels too gold_tokens = accelerator.pad_across_processes( batch["labels"], dim=1, pad_index=tokenizer.pad_token_id ) pred_tokens, gold_tokens = accelerator.gather_for_metrics((pred_tokens, gold_tokens)) pred_tokens, gold_tokens = pred_tokens.cpu().numpy(), gold_tokens.cpu().numpy() if isinstance(pred_tokens, tuple): pred_tokens = pred_tokens[0] decoded_pred = tokenizer.batch_decode(pred_tokens, skip_special_tokens=True) decoded_gold = tokenizer.batch_decode(gold_tokens, skip_special_tokens=True) # Extract the numbers in sentences accelerator.print(decoded_pred) ans_pred_list += [extract_answer_number(sentence_pred) for sentence_pred in decoded_pred] ans_gold_list += [extract_answer_number(sentence_gold) for sentence_gold in decoded_gold] accelerator.print(ans_pred_list) accelerator.print(ans_gold_list) accuracy = compute_accuracy(ans_gold_list, ans_pred_list) logger.info(f"epoch {epoch}: accuracy: {accuracy}") if args.with_tracking: accelerator.log( { "accuracy": accuracy, "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.with_tracking: accelerator.end_training() if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True) PATTERN_NUMBER = re.compile(r"-?\d+\.?\d*") def extract_answer_number(sentence: str) -> float: sentence = sentence.replace(",", "") pred = PATTERN_NUMBER.findall(sentence) if not pred: return float("inf") segment = sentence.split("The final answer is ") if len(segment) > 1: pred_answer = segment[1] pred_answer = PATTERN_NUMBER.findall(pred_answer) if len(pred_answer) > 0: pred_answer = pred_answer[0] else: pred_answer = float(pred[-1]) else: pred_answer = float(pred[-1]) if isinstance(pred_answer, str): try: pred_answer = float(pred_answer) except ValueError: pred_answer = float("inf") return pred_answer def compute_accuracy(pred: list, gold: list): acc = 0.0 for p, g in zip(pred, gold): if p == g: acc += 1 return acc / len(pred) if __name__ == "__main__": main() # example command # python train_gsm8k_llama.py \ # --model_name_or_path LoftQ/Llama-2-7b-hf-bit4-rank64-backbone \ # --adapter_name_or_path LoftQ/Llama-2-7b-hf-bit4-rank64-adapters \ # --output_dir exp_results/gsm8k/llama-2-7b/bit4-rank64/lr3e-4 \ # --learning_rate 1e-4 \ # --seed 202 \ # --dataset_name gsm8k \ # --dataset_config main \ # --pad_to_max_length \ # --max_source_length 128 \ # --max_target_length 256 \ # --num_train_epochs 5 \ # --per_device_train_batch_size 4 \ # --per_device_eval_batch_size 4 \ # --gradient_accumulation_steps 4 \ # --with_tracking \ # --report_to tensorboard
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/loftq_finetuning/quantize_save_load.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import torch import torch.nn as nn from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, ) from peft import LoftQConfig, LoraConfig, PeftModel, TaskType, get_peft_model class Shell(nn.Module): def __init__(self, weight, bias=None): super().__init__() self.weight = nn.Parameter(weight, requires_grad=False) if bias is not None: self.bias = nn.Parameter(bias, requires_grad=False) def unwarap_model(model, sub_module_name=".base_layer"): sub_module_name_list = [k.split(sub_module_name)[0] for k in model.state_dict().keys() if sub_module_name in k] sub_module_name_set = set(sub_module_name_list) for name in sub_module_name_set: # get the parent of the submodule name_parent = ".".join(name.split(".")[:-1]) name_child = name.split(".")[-1] sub_module = model.get_submodule(name_parent) print(sub_module) # replace with shell child = getattr(sub_module, name_child) weight = getattr(child.base_layer, "weight", None) bias = getattr(child.base_layer, "bias", None) shell = Shell(weight, bias) setattr(sub_module, name_child, shell) print("You have unwrapped the model. Use it on your own risk.") def print_model(model, name): print("=" * 10 + name + "=" * 10) print(model) for name, param in model.named_parameters(): if torch.is_tensor(param): if param.dtype in [torch.float32, torch.float16]: print( name, param.shape, param.device, param.dtype, param.requires_grad, param.mean().item(), param.max().item(), ) else: print(name, param.shape, param.device, param.dtype, param.requires_grad) def arg_parse(): parser = argparse.ArgumentParser(description="Quantize a model with LoftQ.") parser.add_argument( "--model_name_or_path", type=str, default=None, required=True, help="The name or path of the fp32/16 model.", ) parser.add_argument( "--token", type=str, default=None, help="The access token to download model from HuggingFace Hub.", ) parser.add_argument( "--bits", type=int, default=4, help="The quantized bits", ) parser.add_argument( "--iter", type=int, default=1, help="The alternating steps in LoftQ", ) parser.add_argument( "--rank", type=int, default=16, help="The rank of the LoRA adapter", ) parser.add_argument( "--save_dir", type=str, default="./model_zoo/loftq/", help="The rank of the LoRA adapter", ) args = parser.parse_args() return args def quantize_and_save(): args = arg_parse() # Download weights and configure LoRA tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, token=args.token, trust_remote_code=True) if any(name in args.model_name_or_path.lower() for name in ["llama", "mistral", "falcon"]): model = AutoModelForCausalLM.from_pretrained( args.model_name_or_path, token=args.token, trust_remote_code=True, device_map="auto" ) task_type = TaskType.CAUSAL_LM target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "down_proj", "gate_proj"] elif any(name in args.model_name_or_path.lower() for name in ["bart", "t5"]): model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path, token=args.token, device_map="auto") task_type = TaskType.SEQ_2_SEQ_LM target_modules = ["q_proj", "k_proj", "v_proj", "fc1", "fc2", "out_proj"] elif any(name in args.model_name_or_path.lower() for name in ["deberta", "roberta", "bert"]): model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, token=args.token) model = model.cuda() task_type = TaskType.SEQ_CLS target_modules = ["query_proj", "key_proj", "value_proj", "dense"] # embeddings not supported by peft else: raise NotImplementedError("Other models not supported yet.") # Config of LoftQ loftq_config = LoftQConfig(loftq_bits=args.bits, loftq_iter=args.iter) lora_config = LoraConfig( task_type=task_type, inference_mode=True, r=args.rank, lora_alpha=16 if task_type is TaskType.CAUSAL_LM else args.rank, lora_dropout=0.1, target_modules=target_modules, init_lora_weights="loftq", loftq_config=loftq_config, ) # Obtain LoftQ model lora_model = get_peft_model(model, lora_config) base_model = lora_model.get_base_model() # Save LoftQ model model_name = args.model_name_or_path.split("/")[-1] + f"-{args.bits}bit" + f"-{args.rank}rank" base_model_dir = os.path.join(args.save_dir, model_name) lora_model_dir = os.path.join(args.save_dir, model_name, "loft_init") # save lora adapters first lora_model.base_model.peft_config[ "default" ].base_model_name_or_path = base_model_dir # This can be a local path or Hub model id lora_model.base_model.peft_config["default"].init_lora_weights = True # Don't apply LoftQ when loading again lora_model.save_pretrained(lora_model_dir) print_model(lora_model, "lora_model") # remove lora adapters and save the backbone unwarap_model(base_model) base_model.save_pretrained(base_model_dir) tokenizer.save_pretrained(base_model_dir) print_model(base_model, "base_model") return base_model_dir, lora_model_dir def load_loftq(base_model_path, lora_adapter_path): if any(name in base_model_path.lower() for name in ["llama", "mistral", "falcon"]): model = AutoModelForCausalLM.from_pretrained( base_model_path, device_map="auto", low_cpu_mem_usage=True, quantization_config=BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=False, bnb_4bit_quant_type="nf4", ), ) elif any(name in base_model_path.lower() for name in ["bart", "t5"]): model = AutoModelForSeq2SeqLM.from_pretrained( base_model_path, device_map="auto", low_cpu_mem_usage=True, load_in_4bit=True, quantization_config=BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=False, bnb_4bit_quant_type="nf4", ), ) elif any(name in base_model_path.lower() for name in ["deberta", "roberta", "bert"]): model = AutoModelForSequenceClassification.from_pretrained( base_model_path, low_cpu_mem_usage=True, load_in_4bit=True, quantization_config=BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=False, bnb_4bit_quant_type="nf4", ), ) else: raise NotImplementedError("Other models not supported yet.") lora_model = PeftModel.from_pretrained(model, lora_adapter_path, is_trainable=True) # Do training or inference below print_model(lora_model, "lora_model") print_model(model, "base_model") if __name__ == "__main__": base_dir, lora_dir = quantize_and_save() load_loftq(base_dir, lora_dir) # example command: # python quantize_save_load.py \ # --model_name_or_path meta-llama/Llama-2-7b-hf \ # --token XXX \ # --bits 4 --iter 5 --rank 16 \ # --save_dir ./model_zoo/loftq/
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/oft_dreambooth/train_dreambooth.py
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from contextlib import nullcontext from pathlib import Path from typing import Optional import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import get_peft_model from peft.tuners.oft.config import OFTConfig # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) UNET_TARGET_MODULES = ["to_q", "to_v", "query", "value"] # , "ff.net.0.proj"] TEXT_ENCODER_TARGET_MODULES = ["q_proj", "v_proj"] def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=True, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run dreambooth validation every X steps. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") # oft args parser.add_argument("--use_oft", action="store_true", help="Whether to use OFT for parameter efficient tuning") parser.add_argument("--oft_r", type=int, default=8, help="OFT rank, only used if use_oft is True") parser.add_argument("--oft_alpha", type=int, default=32, help="OFT alpha, only used if use_oft is True") parser.add_argument("--oft_dropout", type=float, default=0.0, help="OFT dropout, only used if use_oft is True") parser.add_argument( "--oft_use_coft", action="store_true", help="Using constrained OFT, only used if use_oft is True" ) parser.add_argument( "--oft_eps", type=float, default=0.0, help="The control strength of COFT. Only has an effect if `oft_use_coft` is set to True.", ) parser.add_argument( "--oft_text_encoder_r", type=int, default=8, help="OFT rank for text encoder, only used if `use_oft` and `train_text_encoder` are True", ) parser.add_argument( "--oft_text_encoder_alpha", type=int, default=32, help="OFT alpha for text encoder, only used if `use_oft` and `train_text_encoder` are True", ) parser.add_argument( "--oft_text_encoder_dropout", type=float, default=0.0, help="OFT dropout for text encoder, only used if `use_oft` and `train_text_encoder` are True", ) parser.add_argument( "--oft_text_encoder_use_coft", action="store_true", help="Using constrained OFT on the text encoder, only used if use_oft is True", ) parser.add_argument( "--oft_text_encoder_eps", type=float, default=0.0, help="The control strength of COFT on the text encoder. Only has an effect if `oft_text_encoder_use_coft` is set to True.", ) parser.add_argument( "--num_dataloader_workers", type=int, default=1, help="Num of workers for the training dataloader." ) parser.add_argument( "--no_tracemalloc", default=False, action="store_true", help="Flag to stop memory allocation tracing during training. This could speed up training on Windows.", ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--wandb_key", type=str, default=None, help=("If report to option is set to wandb, api-key for wandb used for login to wandb "), ) parser.add_argument( "--wandb_project_name", type=str, default=None, help=("If report to option is set to wandb, project name in wandb for log tracking "), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") return args # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() self.process = psutil.Process() self.cpu_begin = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() return self def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_peak = -1 while True: self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def __exit__(self, *exc): self.peak_monitoring = False gc.collect() torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) self.cpu_end = self.cpu_mem_used() self.cpu_used = b2mb(self.cpu_end - self.cpu_begin) self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.class_prompt = class_prompt else: self.class_data_root = None self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") example["instance_images"] = self.image_transforms(instance_image) example["instance_prompt_ids"] = self.tokenizer( self.instance_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) example["class_prompt_ids"] = self.tokenizer( self.class_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids return example def collate_fn(examples, with_prior_preservation=False): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.cat(input_ids, dim=0) batch = { "input_ids": input_ids, "pixel_values": pixel_values, } return batch class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple GPUs." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): if token is None: token = HfFolder.get_token() if organization is None: username = whoami(token)["name"] return f"{username}/{model_id}" else: return f"{organization}/{model_id}" def main(args): logging_dir = Path(args.output_dir, args.logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_dir=logging_dir, ) if args.report_to == "wandb": import wandb wandb.login(key=args.wandb_key) wandb.init(project=args.wandb_project_name) # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: raise ValueError( "Gradient accumulation is not supported when training the text encoder in distributed training. " "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 if args.prior_generation_precision == "fp32": torch_dtype = torch.float32 elif args.prior_generation_precision == "fp16": torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): images = pipeline(example["prompt"]).images for i, image in enumerate(images): hash_image = hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) # noqa: F841 with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) # Load the tokenizer if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) # Load scheduler and models noise_scheduler = DDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, ) # DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) if args.use_oft: config = OFTConfig( r=args.oft_r, alpha=args.oft_alpha, target_modules=UNET_TARGET_MODULES, module_dropout=args.oft_dropout, init_weights=True, coft=args.oft_use_coft, eps=args.oft_eps, ) unet = get_peft_model(unet, config) unet.print_trainable_parameters() print(unet) vae.requires_grad_(False) if not args.train_text_encoder: text_encoder.requires_grad_(False) elif args.train_text_encoder and args.use_oft: config = OFTConfig( r=args.oft_text_encoder_r, alpha=args.oft_text_encoder_alpha, target_modules=TEXT_ENCODER_TARGET_MODULES, module_dropout=args.oft_text_encoder_dropout, init_weights=True, coft=args.oft_text_encoder_use_coft, eps=args.oft_text_encoder_eps, ) text_encoder = get_peft_model(text_encoder, config) text_encoder.print_trainable_parameters() print(text_encoder) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # below fails when using oft so commenting it out if args.train_text_encoder and not args.use_oft: text_encoder.gradient_checkpointing_enable() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.num_dataloader_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae and text_encoder to device and cast to weight_dtype vae.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder: text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("dreambooth", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = resume_global_step // num_update_steps_per_epoch resume_step = resume_global_step % num_update_steps_per_epoch # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() if args.train_text_encoder: text_encoder.train() with TorchTracemalloc() if not args.no_tracemalloc else nullcontext() as tracemalloc: for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * 0.18215 # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) global_step += 1 logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if ( args.validation_prompt is not None and (step + num_update_steps_per_epoch * epoch) % args.validation_steps == 0 ): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) # create pipeline pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, safety_checker=None, revision=args.revision, ) # set `keep_fp32_wrapper` to True because we do not want to remove # mixed precision hooks while we are still training pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True) pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True) pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference if args.seed is not None: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) else: generator = None images = [] for _ in range(args.num_validation_images): image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": import wandb tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline torch.cuda.empty_cache() if global_step >= args.max_train_steps: break # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage if not args.no_tracemalloc: accelerator.print("GPU Memory before entering the train : {}".format(b2mb(tracemalloc.begin))) accelerator.print("GPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used)) accelerator.print("GPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked)) accelerator.print( "GPU Total Peak Memory consumed during the train (max): {}".format( tracemalloc.peaked + b2mb(tracemalloc.begin) ) ) accelerator.print("CPU Memory before entering the train : {}".format(b2mb(tracemalloc.cpu_begin))) accelerator.print( "CPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.cpu_used) ) accelerator.print( "CPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.cpu_peaked) ) accelerator.print( "CPU Total Peak Memory consumed during the train (max): {}".format( tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin) ) ) # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: if args.use_oft: unwarpped_unet = accelerator.unwrap_model(unet) unwarpped_unet.save_pretrained( os.path.join(args.output_dir, "unet"), state_dict=accelerator.get_state_dict(unet) ) if args.train_text_encoder: unwarpped_text_encoder = accelerator.unwrap_model(text_encoder) unwarpped_text_encoder.save_pretrained( os.path.join(args.output_dir, "text_encoder"), state_dict=accelerator.get_state_dict(text_encoder), ) else: pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), revision=args.revision, ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/oft_dreambooth/oft_dreambooth_inference.ipynb
from diffusers import DiffusionPipeline from diffusers.utils import check_min_version, get_logger from peft import PeftModel # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) BASE_MODEL_NAME = "stabilityai/stable-diffusion-2-1-base" ADAPTER_MODEL_PATH = "INSERT MODEL PATH HERE"pipe = DiffusionPipeline.from_pretrained( BASE_MODEL_NAME, ) pipe.to('cuda') pipe.unet = PeftModel.from_pretrained(pipe.unet, ADAPTER_MODEL_PATH + "/unet", adapter_name="default") pipe.text_encoder = PeftModel.from_pretrained(pipe.text_encoder, ADAPTER_MODEL_PATH + "/text_encoder", adapter_name="default")prompt = "A photo of a sks dog" image = pipe( prompt, num_inference_steps=50, height=512, width=512, ).images[0] image
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/fp4_finetuning/finetune_fp4_opt_bnb_peft.py
import os import torch import torch.nn as nn import transformers from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from peft import LoraConfig, get_peft_model os.environ["CUDA_VISIBLE_DEVICES"] = "0" # -*- coding: utf-8 -*- """Finetune-opt-bnb-peft.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1jCkpikz0J2o20FBQmYmAGdiKmJGOMo-o ## Fine-tune large models using 🤗 `peft` adapters, `transformers` & `bitsandbytes` In this tutorial we will cover how we can fine-tune large language models using the very recent `peft` library and `bitsandbytes` for loading large models in 8-bit. The fine-tuning method will rely on a recent method called "Low Rank Adapters" (LoRA), instead of fine-tuning the entire model you just have to fine-tune these adapters and load them properly inside the model. After fine-tuning the model you can also share your adapters on the 🤗 Hub and load them very easily. Let's get started! ### Install requirements First, run the cells below to install the requirements: """ """### Model loading Here let's load the `opt-6.7b` model, its weights in half-precision (float16) are about 13GB on the Hub! If we load them in 8-bit we would require around 7GB of memory instead. """ free_in_GB = int(torch.cuda.mem_get_info()[0] / 1024**3) max_memory = f"{free_in_GB-2}GB" n_gpus = torch.cuda.device_count() max_memory = {i: max_memory for i in range(n_gpus)} model = AutoModelForCausalLM.from_pretrained( "facebook/opt-350m", max_memory=max_memory, quantization_config=BitsAndBytesConfig( load_in_4bit=True, llm_int8_threshold=6.0, llm_int8_has_fp16_weight=False, bnb_4bit_compute_dtype=torch.float16, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", ), torch_dtype=torch.float16, ) tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") """### Post-processing on the model Finally, we need to apply some post-processing on the 8-bit model to enable training, let's freeze all our layers, and cast the layer-norm in `float32` for stability. We also cast the output of the last layer in `float32` for the same reasons. """ print(model) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability param.data = param.data.to(torch.float32) # model.gradient_checkpointing_enable() # reduce number of stored activations # model.model.decoder.project_in = lambda x: x.requires_grad_(True) class CastOutputToFloat(nn.Sequential): def forward(self, x): return super().forward(x).to(torch.float32) model.lm_head = CastOutputToFloat(model.lm_head) """### Apply LoRA Here comes the magic with `peft`! Let's load a `PeftModel` and specify that we are going to use low-rank adapters (LoRA) using `get_peft_model` utility function from `peft`. """ def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) config = LoraConfig( r=64, lora_alpha=32, target_modules=["q_proj", "v_proj", "out_proj", "fc1", "fc2"], lora_dropout=0.01, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) print_trainable_parameters(model) # Verifying the datatypes. dtypes = {} for _, p in model.named_parameters(): dtype = p.dtype if dtype not in dtypes: dtypes[dtype] = 0 dtypes[dtype] += p.numel() total = 0 for k, v in dtypes.items(): total += v for k, v in dtypes.items(): print(k, v, v / total) """### Training""" data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = transformers.Trainer( model=model, train_dataset=data["train"], args=transformers.TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=10, max_steps=20, learning_rate=3e-4, fp16=True, logging_steps=1, output_dir="outputs", ), data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False # silence the warnings. Please re-enable for inference! trainer.train() # from huggingface_hub import notebook_login # notebook_login() # model.push_to_hub("ybelkada/opt-6.7b-lora", use_auth_token=True) """## Load adapters from the Hub You can also directly load adapters from the Hub using the commands below: """ # import torch # from peft import PeftModel, PeftConfig # from transformers import AutoModelForCausalLM, AutoTokenizer # # peft_model_id = "ybelkada/opt-6.7b-lora" # config = PeftConfig.from_pretrained(peft_model_id) # model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True, load_in_8bit=True, device_map='auto') # tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # ## Load the Lora model # model = PeftModel.from_pretrained(model, peft_model_id) # # """## Inference # # You can then directly use the trained model or the model that you have loaded from the 🤗 Hub for inference as you would do it usually in `transformers`. # """ # batch = tokenizer("Two things are infinite: ", return_tensors="pt") model.config.use_cache = False # silence the warnings. Please re-enable for inference! model.eval() with torch.cuda.amp.autocast(): output_tokens = model.generate(**batch, max_new_tokens=50) print("\n\n", tokenizer.decode(output_tokens[0], skip_special_tokens=True)) # model.save('./test.pt') # """As you can see by fine-tuning for few steps we have almost recovered the quote from Albert Einstein that is present in the [training data](https://huggingface.co/datasets/Abirate/english_quotes)."""
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/stable_diffusion/train_dreambooth.py
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from pathlib import Path from typing import Optional, Union import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoHaConfig, LoKrConfig, LoraConfig, get_peft_model # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) UNET_TARGET_MODULES = [ "to_q", "to_k", "to_v", "proj", "proj_in", "proj_out", "conv", "conv1", "conv2", "conv_shortcut", "to_out.0", "time_emb_proj", "ff.net.2", ] TEXT_ENCODER_TARGET_MODULES = ["fc1", "fc2", "q_proj", "k_proj", "v_proj", "out_proj"] def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.") def create_unet_adapter_config(args: argparse.Namespace) -> Union[LoraConfig, LoHaConfig, LoKrConfig]: if args.adapter == "full": raise ValueError("Cannot create unet adapter config for full parameter") if args.adapter == "lora": config = LoraConfig( r=args.unet_r, lora_alpha=args.unet_alpha, target_modules=UNET_TARGET_MODULES, lora_dropout=args.unet_dropout, bias=args.unet_bias, init_lora_weights=True, ) elif args.adapter == "loha": config = LoHaConfig( r=args.unet_r, alpha=args.unet_alpha, target_modules=UNET_TARGET_MODULES, rank_dropout=args.unet_rank_dropout, module_dropout=args.unet_module_dropout, use_effective_conv2d=args.unet_use_effective_conv2d, init_weights=True, ) elif args.adapter == "lokr": config = LoKrConfig( r=args.unet_r, alpha=args.unet_alpha, target_modules=UNET_TARGET_MODULES, rank_dropout=args.unet_rank_dropout, module_dropout=args.unet_module_dropout, use_effective_conv2d=args.unet_use_effective_conv2d, decompose_both=args.unet_decompose_both, decompose_factor=args.unet_decompose_factor, init_weights=True, ) else: raise ValueError(f"Unknown adapter type {args.adapter}") return config def create_text_encoder_adapter_config(args: argparse.Namespace) -> Union[LoraConfig, LoHaConfig, LoKrConfig]: if args.adapter == "full": raise ValueError("Cannot create text_encoder adapter config for full parameter") if args.adapter == "lora": config = LoraConfig( r=args.te_r, lora_alpha=args.te_alpha, target_modules=TEXT_ENCODER_TARGET_MODULES, lora_dropout=args.te_dropout, bias=args.te_bias, init_lora_weights=True, ) elif args.adapter == "loha": config = LoHaConfig( r=args.te_r, alpha=args.te_alpha, target_modules=TEXT_ENCODER_TARGET_MODULES, rank_dropout=args.te_rank_dropout, module_dropout=args.te_module_dropout, init_weights=True, ) elif args.adapter == "lokr": config = LoKrConfig( r=args.te_r, alpha=args.te_alpha, target_modules=TEXT_ENCODER_TARGET_MODULES, rank_dropout=args.te_rank_dropout, module_dropout=args.te_module_dropout, decompose_both=args.te_decompose_both, decompose_factor=args.te_decompose_factor, init_weights=True, ) else: raise ValueError(f"Unknown adapter type {args.adapter}") return config def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=True, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run dreambooth validation every X steps. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--wandb_key", type=str, default=None, help=("If report to option is set to wandb, api-key for wandb used for login to wandb "), ) parser.add_argument( "--wandb_project_name", type=str, default=None, help=("If report to option is set to wandb, project name in wandb for log tracking "), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) # Adapter arguments subparsers = parser.add_subparsers(dest="adapter") # Dummy subparser to train whole model subparsers.add_parser("full", help="Train full model without adapters") # LoRA adapter lora = subparsers.add_parser("lora", help="Use LoRA adapter") lora.add_argument("--unet_r", type=int, default=8, help="LoRA rank for unet") lora.add_argument("--unet_alpha", type=int, default=8, help="LoRA alpha for unet") lora.add_argument("--unet_dropout", type=float, default=0.0, help="LoRA dropout probability for unet") lora.add_argument( "--unet_bias", type=str, default="none", help="Bias type for LoRA. Can be 'none', 'all' or 'lora_only'", ) lora.add_argument( "--te_r", type=int, default=8, help="LoRA rank for text_encoder, only used if `train_text_encoder` is True" ) lora.add_argument( "--te_alpha", type=int, default=8, help="LoRA alpha for text_encoder, only used if `train_text_encoder` is True", ) lora.add_argument( "--te_dropout", type=float, default=0.0, help="LoRA dropout probability for text_encoder, only used if `train_text_encoder` is True", ) lora.add_argument( "--te_bias", type=str, default="none", help="Bias type for LoRA. Can be 'none', 'all' or 'lora_only', only used if `train_text_encoder` is True", ) # LoHa adapter loha = subparsers.add_parser("loha", help="Use LoHa adapter") loha.add_argument("--unet_r", type=int, default=8, help="LoHa rank for unet") loha.add_argument("--unet_alpha", type=int, default=8, help="LoHa alpha for unet") loha.add_argument("--unet_rank_dropout", type=float, default=0.0, help="LoHa rank_dropout probability for unet") loha.add_argument( "--unet_module_dropout", type=float, default=0.0, help="LoHa module_dropout probability for unet" ) loha.add_argument( "--unet_use_effective_conv2d", action="store_true", help="Use parameter effective decomposition in unet for Conv2d 3x3 with ksize > 1", ) loha.add_argument( "--te_r", type=int, default=8, help="LoHa rank for text_encoder, only used if `train_text_encoder` is True" ) loha.add_argument( "--te_alpha", type=int, default=8, help="LoHa alpha for text_encoder, only used if `train_text_encoder` is True", ) loha.add_argument( "--te_rank_dropout", type=float, default=0.0, help="LoHa rank_dropout probability for text_encoder, only used if `train_text_encoder` is True", ) loha.add_argument( "--te_module_dropout", type=float, default=0.0, help="LoHa module_dropout probability for text_encoder, only used if `train_text_encoder` is True", ) # LoKr adapter lokr = subparsers.add_parser("lokr", help="Use LoKr adapter") lokr.add_argument("--unet_r", type=int, default=8, help="LoKr rank for unet") lokr.add_argument("--unet_alpha", type=int, default=8, help="LoKr alpha for unet") lokr.add_argument("--unet_rank_dropout", type=float, default=0.0, help="LoKr rank_dropout probability for unet") lokr.add_argument( "--unet_module_dropout", type=float, default=0.0, help="LoKr module_dropout probability for unet" ) lokr.add_argument( "--unet_use_effective_conv2d", action="store_true", help="Use parameter effective decomposition in unet for Conv2d 3x3 with ksize > 1", ) lokr.add_argument( "--unet_decompose_both", action="store_true", help="Decompose left matrix in kronecker product for unet" ) lokr.add_argument( "--unet_decompose_factor", type=int, default=-1, help="Decompose factor in kronecker product for unet" ) lokr.add_argument( "--te_r", type=int, default=8, help="LoKr rank for text_encoder, only used if `train_text_encoder` is True" ) lokr.add_argument( "--te_alpha", type=int, default=8, help="LoKr alpha for text_encoder, only used if `train_text_encoder` is True", ) lokr.add_argument( "--te_rank_dropout", type=float, default=0.0, help="LoKr rank_dropout probability for text_encoder, only used if `train_text_encoder` is True", ) lokr.add_argument( "--te_module_dropout", type=float, default=0.0, help="LoKr module_dropout probability for text_encoder, only used if `train_text_encoder` is True", ) lokr.add_argument( "--te_decompose_both", action="store_true", help="Decompose left matrix in kronecker product for text_encoder, only used if `train_text_encoder` is True", ) lokr.add_argument( "--te_decompose_factor", type=int, default=-1, help="Decompose factor in kronecker product for text_encoder, only used if `train_text_encoder` is True", ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") return args # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() self.process = psutil.Process() self.cpu_begin = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() return self def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_peak = -1 while True: self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def __exit__(self, *exc): self.peak_monitoring = False gc.collect() torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) self.cpu_end = self.cpu_mem_used() self.cpu_used = b2mb(self.cpu_end - self.cpu_begin) self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.class_prompt = class_prompt else: self.class_data_root = None self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") example["instance_images"] = self.image_transforms(instance_image) example["instance_prompt_ids"] = self.tokenizer( self.instance_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) example["class_prompt_ids"] = self.tokenizer( self.class_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids return example def collate_fn(examples, with_prior_preservation=False): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.cat(input_ids, dim=0) batch = { "input_ids": input_ids, "pixel_values": pixel_values, } return batch class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple GPUs." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): if token is None: token = HfFolder.get_token() if organization is None: username = whoami(token)["name"] return f"{username}/{model_id}" else: return f"{organization}/{model_id}" def main(args): logging_dir = Path(args.output_dir, args.logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_dir=logging_dir, ) if args.report_to == "wandb": import wandb wandb.login(key=args.wandb_key) wandb.init(project=args.wandb_project_name) # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: raise ValueError( "Gradient accumulation is not supported when training the text encoder in distributed training. " "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 if args.prior_generation_precision == "fp32": torch_dtype = torch.float32 elif args.prior_generation_precision == "fp16": torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): images = pipeline(example["prompt"]).images for i, image in enumerate(images): hash_image = hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) # noqa: F841 with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) # Load the tokenizer if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) # Load scheduler and models noise_scheduler = DDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, ) # DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) if args.adapter != "full": config = create_unet_adapter_config(args) unet = get_peft_model(unet, config) unet.print_trainable_parameters() print(unet) vae.requires_grad_(False) if not args.train_text_encoder: text_encoder.requires_grad_(False) elif args.train_text_encoder and args.adapter != "full": config = create_text_encoder_adapter_config(args) text_encoder = get_peft_model(text_encoder, config) text_encoder.print_trainable_parameters() print(text_encoder) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: unet.enable_gradient_checkpointing() if args.train_text_encoder and not args.adapter != "full": text_encoder.gradient_checkpointing_enable() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=1, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae and text_encoder to device and cast to weight_dtype vae.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder: text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("dreambooth", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = resume_global_step // num_update_steps_per_epoch resume_step = resume_global_step % num_update_steps_per_epoch # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() if args.train_text_encoder: text_encoder.train() with TorchTracemalloc() as tracemalloc: for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * 0.18215 # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) global_step += 1 # if global_step % args.checkpointing_steps == 0: # if accelerator.is_main_process: # save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") # accelerator.save_state(save_path) # logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if ( args.validation_prompt is not None and (step + num_update_steps_per_epoch * epoch) % args.validation_steps == 0 ): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) # create pipeline pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, safety_checker=None, revision=args.revision, ) # set `keep_fp32_wrapper` to True because we do not want to remove # mixed precision hooks while we are still training pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True) pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True) pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # Set evaliation mode pipeline.unet.eval() pipeline.text_encoder.eval() # run inference if args.seed is not None: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) else: generator = None images = [] for _ in range(args.num_validation_images): image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": import wandb tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) # Set evaliation mode pipeline.unet.train() pipeline.text_encoder.train() del pipeline torch.cuda.empty_cache() if global_step >= args.max_train_steps: break # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("GPU Memory before entering the train : {}".format(b2mb(tracemalloc.begin))) accelerator.print("GPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used)) accelerator.print("GPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked)) accelerator.print( "GPU Total Peak Memory consumed during the train (max): {}".format( tracemalloc.peaked + b2mb(tracemalloc.begin) ) ) accelerator.print("CPU Memory before entering the train : {}".format(b2mb(tracemalloc.cpu_begin))) accelerator.print("CPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.cpu_used)) accelerator.print("CPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.cpu_peaked)) accelerator.print( "CPU Total Peak Memory consumed during the train (max): {}".format( tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin) ) ) # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: if args.adapter != "full": unwarpped_unet = accelerator.unwrap_model(unet) unwarpped_unet.save_pretrained( os.path.join(args.output_dir, "unet"), state_dict=accelerator.get_state_dict(unet) ) if args.train_text_encoder: unwarpped_text_encoder = accelerator.unwrap_model(text_encoder) unwarpped_text_encoder.save_pretrained( os.path.join(args.output_dir, "text_encoder"), state_dict=accelerator.get_state_dict(text_encoder), ) else: pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), revision=args.revision, ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/stable_diffusion/convert_sd_adapter_to_peft.py
import argparse import json import logging import os from collections import Counter from dataclasses import dataclass from operator import attrgetter from typing import Dict, List, Optional, Union import safetensors import torch import torch.nn as nn from diffusers import UNet2DConditionModel from transformers import CLIPTextModel from peft import LoHaConfig, LoKrConfig, LoraConfig, PeftType, get_peft_model, set_peft_model_state_dict from peft.tuners.lokr.layer import factorization # Default kohya_ss LoRA replacement modules # https://github.com/kohya-ss/sd-scripts/blob/c924c47f374ac1b6e33e71f82948eb1853e2243f/networks/lora.py#L661 UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention"] UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"] TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"] PREFIX_UNET = "lora_unet" PREFIX_TEXT_ENCODER = "lora_te" @dataclass class LoRAInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None lora_A: Optional[torch.Tensor] = None lora_B: Optional[torch.Tensor] = None def peft_state_dict(self) -> Dict[str, torch.Tensor]: if self.lora_A is None or self.lora_B is None: raise ValueError("At least one of lora_A or lora_B is None, they must both be provided") return { f"base_model.model{self.peft_key}.lora_A.weight": self.lora_A, f"base_model.model.{self.peft_key}.lora_B.weight": self.lora_B, } @dataclass class LoHaInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None hada_w1_a: Optional[torch.Tensor] = None hada_w1_b: Optional[torch.Tensor] = None hada_w2_a: Optional[torch.Tensor] = None hada_w2_b: Optional[torch.Tensor] = None hada_t1: Optional[torch.Tensor] = None hada_t2: Optional[torch.Tensor] = None def peft_state_dict(self) -> Dict[str, torch.Tensor]: if self.hada_w1_a is None or self.hada_w1_b is None or self.hada_w2_a is None or self.hada_w2_b is None: raise ValueError( "At least one of hada_w1_a, hada_w1_b, hada_w2_a, hada_w2_b is missing, they all must be provided" ) state_dict = { f"base_model.model.{self.peft_key}.hada_w1_a": self.hada_w1_a, f"base_model.model.{self.peft_key}.hada_w1_b": self.hada_w1_b, f"base_model.model.{self.peft_key}.hada_w2_a": self.hada_w2_a, f"base_model.model.{self.peft_key}.hada_w2_b": self.hada_w2_b, } if not ( (self.hada_t1 is None and self.hada_t2 is None) or (self.hada_t1 is not None and self.hada_t2 is not None) ): raise ValueError("hada_t1 and hada_t2 must be either both present or not present at the same time") if self.hada_t1 is not None and self.hada_t2 is not None: state_dict[f"base_model.model.{self.peft_key}.hada_t1"] = self.hada_t1 state_dict[f"base_model.model.{self.peft_key}.hada_t2"] = self.hada_t2 return state_dict @dataclass class LoKrInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None lokr_w1: Optional[torch.Tensor] = None lokr_w1_a: Optional[torch.Tensor] = None lokr_w1_b: Optional[torch.Tensor] = None lokr_w2: Optional[torch.Tensor] = None lokr_w2_a: Optional[torch.Tensor] = None lokr_w2_b: Optional[torch.Tensor] = None lokr_t2: Optional[torch.Tensor] = None def peft_state_dict(self) -> Dict[str, torch.Tensor]: if (self.lokr_w1 is None) and ((self.lokr_w1_a is None) or (self.lokr_w1_b is None)): raise ValueError("Either lokr_w1 or both lokr_w1_a and lokr_w1_b should be provided") if (self.lokr_w2 is None) and ((self.lokr_w2_a is None) or (self.lokr_w2_b is None)): raise ValueError("Either lokr_w2 or both lokr_w2_a and lokr_w2_b should be provided") state_dict = {} if self.lokr_w1 is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w1"] = self.lokr_w1 elif self.lokr_w1_a is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w1_a"] = self.lokr_w1_a state_dict[f"base_model.model.{self.peft_key}.lokr_w1_b"] = self.lokr_w1_b if self.lokr_w2 is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w2"] = self.lokr_w2 elif self.lokr_w2_a is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w2_a"] = self.lokr_w2_a state_dict[f"base_model.model.{self.peft_key}.lokr_w2_b"] = self.lokr_w2_b if self.lokr_t2 is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_t2"] = self.lokr_t2 return state_dict def construct_peft_loraconfig(info: Dict[str, LoRAInfo], **kwargs) -> LoraConfig: """Constructs LoraConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoRAInfo]): Information extracted from adapter checkpoint Returns: LoraConfig: config for constructing LoRA """ # Unpack all ranks and alphas ranks = {key: val.rank for key, val in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = sorted(info.keys()) # Determine most common rank and alpha r = int(Counter(ranks.values()).most_common(1)[0][0]) lora_alpha = Counter(alphas.values()).most_common(1)[0][0] # Determine which modules have different rank and alpha rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0])) alpha_pattern = dict(sorted(filter(lambda x: x[1] != lora_alpha, alphas.items()), key=lambda x: x[0])) config = LoraConfig( r=r, lora_alpha=lora_alpha, target_modules=target_modules, lora_dropout=0.0, bias="none", init_lora_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, ) return config def construct_peft_lohaconfig(info: Dict[str, LoHaInfo], **kwargs) -> LoHaConfig: """Constructs LoHaConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoHaInfo]): Information extracted from adapter checkpoint Returns: LoHaConfig: config for constructing LoHA """ # Unpack all ranks and alphas ranks = {x[0]: x[1].rank for x in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = sorted(info.keys()) # Determine most common rank and alpha r = int(Counter(ranks.values()).most_common(1)[0][0]) alpha = Counter(alphas.values()).most_common(1)[0][0] # Determine which modules have different rank and alpha rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0])) alpha_pattern = dict(sorted(filter(lambda x: x[1] != alpha, alphas.items()), key=lambda x: x[0])) # Determine whether any of modules have effective conv2d decomposition use_effective_conv2d = any(((val.hada_t1 is not None) or (val.hada_t2 is not None) for val in info.values())) config = LoHaConfig( r=r, alpha=alpha, target_modules=target_modules, rank_dropout=0.0, module_dropout=0.0, init_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, use_effective_conv2d=use_effective_conv2d, ) return config def construct_peft_lokrconfig(info: Dict[str, LoKrInfo], decompose_factor: int = -1, **kwargs) -> LoKrConfig: """Constructs LoKrConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoKrInfo]): Information extracted from adapter checkpoint Returns: LoKrConfig: config for constructing LoKr """ # Unpack all ranks and alphas ranks = {x[0]: x[1].rank for x in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = sorted(info.keys()) # Determine most common rank and alpha r = int(Counter(ranks.values()).most_common(1)[0][0]) alpha = Counter(alphas.values()).most_common(1)[0][0] # Determine which modules have different rank and alpha rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0])) alpha_pattern = dict(sorted(filter(lambda x: x[1] != alpha, alphas.items()), key=lambda x: x[0])) # Determine whether any of modules have effective conv2d decomposition use_effective_conv2d = any(((val.lokr_t2 is not None) for val in info.values())) # decompose_both should be enabled if any w1 matrix in any layer is decomposed into 2 decompose_both = any((val.lokr_w1_a is not None and val.lokr_w1_b is not None) for val in info.values()) # Determining decompose factor is a bit tricky (but it is most often -1) # Check that decompose_factor is equal to provided for val in info.values(): # Determine shape of first matrix if val.lokr_w1 is not None: w1_shape = tuple(val.lokr_w1.shape) else: w1_shape = (val.lokr_w1_a.shape[0], val.lokr_w1_b.shape[1]) # Determine shape of second matrix if val.lokr_w2 is not None: w2_shape = tuple(val.lokr_w2.shape[:2]) elif val.lokr_t2 is not None: w2_shape = (val.lokr_w2_a.shape[1], val.lokr_w2_b.shape[1]) else: # We may iterate over Conv2d layer, for which second item in shape is multiplied by ksize^2 w2_shape = (val.lokr_w2_a.shape[0], val.lokr_w2_b.shape[1]) # We need to check, whether decompose_factor is really -1 or not shape = (w1_shape[0], w2_shape[0]) if factorization(shape[0] * shape[1], factor=-1) != shape: raise ValueError("Cannot infer decompose_factor, probably it is not equal to -1") config = LoKrConfig( r=r, alpha=alpha, target_modules=target_modules, rank_dropout=0.0, module_dropout=0.0, init_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, use_effective_conv2d=use_effective_conv2d, decompose_both=decompose_both, decompose_factor=decompose_factor, ) return config def combine_peft_state_dict(info: Dict[str, Union[LoRAInfo, LoHaInfo]]) -> Dict[str, torch.Tensor]: result = {} for key_info in info.values(): result.update(key_info.peft_state_dict()) return result def detect_adapter_type(keys: List[str]) -> PeftType: # Detect type of adapter by keys # Inspired by this: # https://github.com/bmaltais/kohya_ss/blob/ed4e3b0239a40506de9a17e550e6cf2d0b867a4f/tools/lycoris_utils.py#L312 for key in keys: if "alpha" in key: continue elif any(x in key for x in ["lora_down", "lora_up"]): # LoRA return PeftType.LORA elif any(x in key for x in ["hada_w1", "hada_w2", "hada_t1", "hada_t2"]): # LoHa may have the following keys: # hada_w1_a, hada_w1_b, hada_w2_a, hada_w2_b, hada_t1, hada_t2 return PeftType.LOHA elif any(x in key for x in ["lokr_w1", "lokr_w2", "lokr_t1", "lokr_t2"]): # LoKr may have the following keys: # lokr_w1, lokr_w2, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t1, lokr_t2 return PeftType.LOKR elif "diff" in key: raise ValueError("Currently full diff adapters are not implemented") else: raise ValueError("Unkown adapter type, probably not implemented") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--sd_checkpoint", default=None, type=str, required=True, help="SD checkpoint to use") parser.add_argument( "--adapter_path", default=None, type=str, required=True, help="Path to downloaded adapter to convert", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output peft adapter.") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--loha_conv2d_weights_fix", action="store_true", help="""LoHa checkpoints trained with lycoris-lora<=1.9.0 contain a bug described in this PR https://github.com/KohakuBlueleaf/LyCORIS/pull/115. This option fixes this bug during weight conversion (replaces hada_t2 with hada_t1 for Conv2d 3x3 layers). The output results may differ from webui, but in general, they should be better in terms of quality. This option should be set to True in case the provided checkpoint has been trained with lycoris-lora version for which the mentioned PR wasn't merged. This option should be set to False in case the provided checkpoint has been trained with lycoris-lora version for which the mentioned PR is merged or full compatibility with webui outputs is required.""", ) args = parser.parse_args() # Load all models that we need to add adapter to text_encoder = CLIPTextModel.from_pretrained(args.sd_checkpoint, subfolder="text_encoder") unet = UNet2DConditionModel.from_pretrained(args.sd_checkpoint, subfolder="unet") # Construct possible mapping from kohya keys to peft keys models_keys = {} for model, model_key, model_name in [ (text_encoder, PREFIX_TEXT_ENCODER, "text_encoder"), (unet, PREFIX_UNET, "unet"), ]: models_keys.update( { f"{model_key}.{peft_key}".replace(".", "_"): peft_key for peft_key in (x[0] for x in model.named_modules()) } ) # Store conversion info (model_type -> peft_key -> LoRAInfo | LoHaInfo | LoKrInfo) adapter_info: Dict[str, Dict[str, Union[LoRAInfo, LoHaInfo, LoKrInfo]]] = { "text_encoder": {}, "unet": {}, } # Store decompose_factor for LoKr decompose_factor = -1 # Open adapter checkpoint with safetensors.safe_open(args.adapter_path, framework="pt", device="cpu") as f: # Extract information about adapter structure metadata = f.metadata() # It may be difficult to determine rank for LoKr adapters # If checkpoint was trained with large rank it may not be utilized during weights creation at all # So we need to get it from checkpoint metadata (along with decompose_factor) rank, conv_rank = None, None if metadata is not None: rank = metadata.get("ss_network_dim", None) rank = int(rank) if rank else None if "ss_network_args" in metadata: network_args = json.loads(metadata["ss_network_args"]) conv_rank = network_args.get("conv_dim", None) conv_rank = int(conv_rank) if conv_rank else rank decompose_factor = network_args.get("factor", -1) decompose_factor = int(decompose_factor) # Detect adapter type based on keys adapter_type = detect_adapter_type(f.keys()) adapter_info_cls = { PeftType.LORA: LoRAInfo, PeftType.LOHA: LoHaInfo, PeftType.LOKR: LoKrInfo, }[adapter_type] # Iterate through available info and unpack all the values for key in f.keys(): kohya_key, kohya_type = key.split(".")[:2] # Find which model this key belongs to if kohya_key.startswith(PREFIX_TEXT_ENCODER): model_type, model = "text_encoder", text_encoder elif kohya_key.startswith(PREFIX_UNET): model_type, model = "unet", unet else: raise ValueError(f"Cannot determine model for key: {key}") # Find corresponding peft key if kohya_key not in models_keys: raise ValueError(f"Cannot find corresponding key for diffusers/transformers model: {kohya_key}") peft_key = models_keys[kohya_key] # Retrieve corresponding layer of model layer = attrgetter(peft_key)(model) # Create a corresponding adapter info if peft_key not in adapter_info[model_type]: adapter_info[model_type][peft_key] = adapter_info_cls(kohya_key=kohya_key, peft_key=peft_key) tensor = f.get_tensor(key) if kohya_type == "alpha": adapter_info[model_type][peft_key].alpha = tensor.item() elif kohya_type == "lora_down": adapter_info[model_type][peft_key].lora_A = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lora_up": adapter_info[model_type][peft_key].lora_B = tensor adapter_info[model_type][peft_key].rank = tensor.shape[1] elif kohya_type == "hada_w1_a": adapter_info[model_type][peft_key].hada_w1_a = tensor elif kohya_type == "hada_w1_b": adapter_info[model_type][peft_key].hada_w1_b = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "hada_w2_a": adapter_info[model_type][peft_key].hada_w2_a = tensor elif kohya_type == "hada_w2_b": adapter_info[model_type][peft_key].hada_w2_b = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type in {"hada_t1", "hada_t2"}: if args.loha_conv2d_weights_fix: if kohya_type == "hada_t1": # This code block fixes a bug that exists for some LoHa checkpoints # that resulted in accidentally using hada_t1 weight instead of hada_t2, see # https://github.com/KohakuBlueleaf/LyCORIS/pull/115 adapter_info[model_type][peft_key].hada_t1 = tensor adapter_info[model_type][peft_key].hada_t2 = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] else: if kohya_type == "hada_t1": adapter_info[model_type][peft_key].hada_t1 = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "hada_t2": adapter_info[model_type][peft_key].hada_t2 = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lokr_t2": adapter_info[model_type][peft_key].lokr_t2 = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lokr_w1": adapter_info[model_type][peft_key].lokr_w1 = tensor if isinstance(layer, nn.Linear) or ( isinstance(layer, nn.Conv2d) and tuple(layer.weight.shape[2:]) == (1, 1) ): adapter_info[model_type][peft_key].rank = rank elif isinstance(layer, nn.Conv2d): adapter_info[model_type][peft_key].rank = conv_rank elif kohya_type == "lokr_w2": adapter_info[model_type][peft_key].lokr_w2 = tensor if isinstance(layer, nn.Linear) or ( isinstance(layer, nn.Conv2d) and tuple(layer.weight.shape[2:]) == (1, 1) ): adapter_info[model_type][peft_key].rank = rank elif isinstance(layer, nn.Conv2d): adapter_info[model_type][peft_key].rank = conv_rank elif kohya_type == "lokr_w1_a": adapter_info[model_type][peft_key].lokr_w1_a = tensor adapter_info[model_type][peft_key].rank = tensor.shape[1] elif kohya_type == "lokr_w1_b": adapter_info[model_type][peft_key].lokr_w1_b = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lokr_w2_a": adapter_info[model_type][peft_key].lokr_w2_a = tensor elif kohya_type == "lokr_w2_b": adapter_info[model_type][peft_key].lokr_w2_b = tensor else: raise ValueError(f"Unknown weight name in key: {key} - {kohya_type}") # Get function which will create adapter config based on extracted info construct_config_fn = { PeftType.LORA: construct_peft_loraconfig, PeftType.LOHA: construct_peft_lohaconfig, PeftType.LOKR: construct_peft_lokrconfig, }[adapter_type] # Process each model sequentially for model, model_name in [(text_encoder, "text_encoder"), (unet, "unet")]: config = construct_config_fn(adapter_info[model_name], decompose_factor=decompose_factor) # Output warning for LoHa with use_effective_conv2d if ( isinstance(config, LoHaConfig) and getattr(config, "use_effective_conv2d", False) and args.loha_conv2d_weights_fix is False ): logging.warning( 'lycoris-lora<=1.9.0 LoHa implementation contains a bug, which can be fixed with "--loha_conv2d_weights_fix".\n' "For more info, please refer to https://github.com/huggingface/peft/pull/1021 and https://github.com/KohakuBlueleaf/LyCORIS/pull/115" ) model = get_peft_model(model, config) set_peft_model_state_dict(model, combine_peft_state_dict(adapter_info[model_name])) if args.half: model.to(torch.float16) # Save model to disk model.save_pretrained(os.path.join(args.dump_path, model_name))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/multilayer_perceptron/multilayer_perceptron_lora.ipynb
import copy import os # ignore bnb warnings os.environ["BITSANDBYTES_NOWELCOME"] = "1"import peft import torch from torch import nn import torch.nn.functional as Ftorch.manual_seed(0)X = torch.rand((1000, 20)) y = (X.sum(1) > 10).long()n_train = 800 batch_size = 64train_dataloader = torch.utils.data.DataLoader( torch.utils.data.TensorDataset(X[:n_train], y[:n_train]), batch_size=batch_size, shuffle=True, ) eval_dataloader = torch.utils.data.DataLoader( torch.utils.data.TensorDataset(X[n_train:], y[n_train:]), batch_size=batch_size, )class MLP(nn.Module): def __init__(self, num_units_hidden=2000): super().__init__() self.seq = nn.Sequential( nn.Linear(20, num_units_hidden), nn.ReLU(), nn.Linear(num_units_hidden, num_units_hidden), nn.ReLU(), nn.Linear(num_units_hidden, 2), nn.LogSoftmax(dim=-1), ) def forward(self, X): return self.seq(X)lr = 0.002 batch_size = 64 max_epochs = 30 device = 'cpu' if not torch.cuda.is_available() else 'cuda'def train(model, optimizer, criterion, train_dataloader, eval_dataloader, epochs): for epoch in range(epochs): model.train() train_loss = 0 for xb, yb in train_dataloader: xb = xb.to(device) yb = yb.to(device) outputs = model(xb) loss = criterion(outputs, yb) train_loss += loss.detach().float() loss.backward() optimizer.step() optimizer.zero_grad() model.eval() eval_loss = 0 for xb, yb in eval_dataloader: xb = xb.to(device) yb = yb.to(device) with torch.no_grad(): outputs = model(xb) loss = criterion(outputs, yb) eval_loss += loss.detach().float() eval_loss_total = (eval_loss / len(eval_dataloader)).item() train_loss_total = (train_loss / len(train_dataloader)).item() print(f"{epoch=:<2} {train_loss_total=:.4f} {eval_loss_total=:.4f}")module = MLP().to(device) optimizer = torch.optim.Adam(module.parameters(), lr=lr) criterion = nn.CrossEntropyLoss()%time train(module, optimizer, criterion, train_dataloader, eval_dataloader, epochs=max_epochs)[(n, type(m)) for n, m in MLP().named_modules()]config = peft.LoraConfig( r=8, target_modules=["seq.0", "seq.2"], modules_to_save=["seq.4"], )module = MLP().to(device) module_copy = copy.deepcopy(module) # we keep a copy of the original model for later peft_model = peft.get_peft_model(module, config) optimizer = torch.optim.Adam(peft_model.parameters(), lr=lr) criterion = nn.CrossEntropyLoss() peft_model.print_trainable_parameters()%time train(peft_model, optimizer, criterion, train_dataloader, eval_dataloader, epochs=max_epochs)for name, param in peft_model.base_model.named_parameters(): if "lora" not in name: continue print(f"New parameter {name:<13} | {param.numel():>5} parameters | updated")params_before = dict(module_copy.named_parameters()) for name, param in peft_model.base_model.named_parameters(): if "lora" in name: continue name_before = name.partition(".")[-1].replace("original_", "").replace("module.", "").replace("modules_to_save.default.", "") param_before = params_before[name_before] if torch.allclose(param, param_before): print(f"Parameter {name_before:<13} | {param.numel():>7} parameters | not updated") else: print(f"Parameter {name_before:<13} | {param.numel():>7} parameters | updated")user = "BenjaminB" # put your user name here model_name = "peft-lora-with-custom-model" model_id = f"{user}/{model_name}"peft_model.push_to_hub(model_id);loaded = peft.PeftModel.from_pretrained(module_copy, model_id) type(loaded)y_peft = peft_model(X.to(device)) y_loaded = loaded(X.to(device)) torch.allclose(y_peft, y_loaded)from huggingface_hub import delete_repodelete_repo(model_id)
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/multilayer_perceptron/README.md
# Fine-tuning a multilayer perceptron using LoRA and 🤗 PEFT [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/multilayer_perceptron/multilayer_perceptron_lora.ipynb) PEFT supports fine-tuning any type of model as long as the layers being used are supported. The model does not have to be a transformers model, for instance. To demonstrate this, the accompanying notebook `multilayer_perceptron_lora.ipynb` shows how to apply LoRA to a simple multilayer perceptron and use it to train a model to perform a classification task.
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/lora_dreambooth/convert_kohya_ss_sd_lora_to_peft.py
import argparse import os from collections import Counter from dataclasses import dataclass from typing import Dict, Optional import safetensors import torch from diffusers import UNet2DConditionModel from transformers import CLIPTextModel from peft import LoraConfig, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict # Default kohya_ss LoRA replacement modules # https://github.com/kohya-ss/sd-scripts/blob/c924c47f374ac1b6e33e71f82948eb1853e2243f/networks/lora.py#L661 UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention"] UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"] TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"] LORA_PREFIX_UNET = "lora_unet" LORA_PREFIX_TEXT_ENCODER = "lora_te" @dataclass class LoRAInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None lora_A: Optional[torch.Tensor] = None lora_B: Optional[torch.Tensor] = None def peft_state_dict(self) -> Dict[str, torch.Tensor]: if self.lora_A is None or self.lora_B is None: raise ValueError("At least one of lora_A or lora_B is None, they must both be provided") return {f"{peft_key}.lora_A.weight": self.lora_A, f"{peft_key}.lora_B.weight": self.lora_A} def construct_peft_loraconfig(info: Dict[str, LoRAInfo]) -> LoraConfig: """Constructs LoraConfig from data extracted from kohya checkpoint Args: info (Dict[str, LoRAInfo]): Information extracted from kohya checkpoint Returns: LoraConfig: config for constructing LoRA """ # Unpack all ranks and alphas ranks = {x[0]: x[1].rank for x in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = list(info.keys()) # Determine most common rank and alpha r = Counter(ranks.values()).most_common(1)[0] lora_alpha = Counter(alphas.values()).most_common(1)[0] # Determine which modules have different rank and alpha rank_pattern = dict(filter(lambda x: x[1] != r, ranks.items())) alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, alphas.items())) config = LoraConfig( r=r, lora_alpha=lora_alpha, target_modules=target_modules, lora_dropout=0.0, bias="none", init_lora_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, ) return config def combine_peft_state_dict(info: Dict[str, LoRAInfo]) -> Dict[str, torch.Tensor]: result = {} for key_name, key_info in info.items(): result[f"base_model.model.{key_name}.lora_A.weight"] = key_info.lora_A result[f"base_model.model.{key_name}.lora_B.weight"] = key_info.lora_B return result if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--sd_checkpoint", default=None, type=str, required=True, help="SD checkpoint to use") parser.add_argument( "--kohya_lora_path", default=None, type=str, required=True, help="Path to kohya_ss trained LoRA" ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") args = parser.parse_args() # Load all models that we need to add adapter to text_encoder = CLIPTextModel.from_pretrained(args.sd_checkpoint, subfolder="text_encoder") unet = UNet2DConditionModel.from_pretrained(args.sd_checkpoint, subfolder="unet") # Construct possible mapping from kohya keys to peft keys models_keys = {} for model, model_key, model_name in [ (text_encoder, LORA_PREFIX_TEXT_ENCODER, "text_encoder"), (unet, LORA_PREFIX_UNET, "unet"), ]: models_keys.update( { f"{model_key}.{peft_key}".replace(".", "_"): peft_key for peft_key in (x[0] for x in model.named_modules()) } ) # Store conversion info (model_type -> peft_key -> LoRAInfo) lora_info: Dict[str, Dict[str, LoRAInfo]] = { "text_encoder": {}, "unet": {}, } # Open kohya_ss checkpoint with safetensors.safe_open(args.kohya_lora_path, framework="pt", device="cpu") as f: # Extract information about LoRA structure metadata = f.metadata() # Iterate through available info and unpack all the values for key in f.keys(): kohya_key, kohya_type = key.split(".")[:2] # Find which model this key belongs to if kohya_key.startswith(LORA_PREFIX_TEXT_ENCODER): model_type = "text_encoder" elif kohya_key.startswith(LORA_PREFIX_UNET): model_type = "unet" else: raise ValueError(f"Cannot determine model for key: {key}") # Find corresponding peft key if kohya_key not in models_keys: raise ValueError(f"Cannot find corresponding key for diffusers/transformers model: {kohya_key}") peft_key = models_keys[kohya_key] if peft_key not in lora_info[model_type]: lora_info[model_type][peft_key] = LoRAInfo(kohya_key=kohya_key, peft_key=peft_key) if kohya_type == "alpha": lora_info[model_type][peft_key].alpha = f.get_tensor(key).item() elif kohya_type == "lora_down": tensor = f.get_tensor(key) lora_info[model_type][peft_key].lora_A = tensor lora_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lora_up": tensor = f.get_tensor(key) lora_info[model_type][peft_key].lora_B = f.get_tensor(key) lora_info[model_type][peft_key].rank = tensor.shape[1] else: raise ValueError(f"Unknown weight name in key: {key} - {kohya_type}") # Process each model for model, model_name in [(text_encoder, "text_encoder"), (unet, "unet")]: config = construct_peft_loraconfig(lora_info[model_name]) model = get_peft_model(model, config) keys_peft = list(get_peft_model_state_dict(model).keys()) keys_new = list(combine_peft_state_dict(lora_info[model_name]).keys()) set_peft_model_state_dict(model, combine_peft_state_dict(lora_info[model_name])) if args.half: model.to(torch.float16) # Save model to disk model.save_pretrained(os.path.join(args.dump_path, model_name))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/lora_dreambooth/lora_dreambooth_inference.ipynb
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from pathlib import Path from typing import Optional import psutil import json import torch import torch.nn.functional as F import torch.utils.checkpoint from torch.utils.data import Dataset import datasets import diffusers import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel from diffusers import DDPMScheduler, PNDMScheduler, StableDiffusionPipeline from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig, CLIPFeatureExtractor from peft import PeftModel, LoraConfig, get_peft_model_state_dict, set_peft_model_state_dict # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) MODEL_NAME = "CompVis/stable-diffusion-v1-4" # "stabilityai/stable-diffusion-2-1-base" INSTANCE_PROMPT = "a photo of sks dog" base_path = "/home/sourab/temp/"def get_lora_sd_pipeline( ckpt_dir, base_model_name_or_path=None, dtype=torch.float16, device="cuda", adapter_name="default" ): unet_sub_dir = os.path.join(ckpt_dir, "unet") text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder") if os.path.exists(text_encoder_sub_dir) and base_model_name_or_path is None: config = LoraConfig.from_pretrained(text_encoder_sub_dir) base_model_name_or_path = config.base_model_name_or_path if base_model_name_or_path is None: raise ValueError("Please specify the base model name or path") pipe = StableDiffusionPipeline.from_pretrained( base_model_name_or_path, torch_dtype=dtype, requires_safety_checker=False ).to(device) pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name) if os.path.exists(text_encoder_sub_dir): pipe.text_encoder = PeftModel.from_pretrained( pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name ) if dtype in (torch.float16, torch.bfloat16): pipe.unet.half() pipe.text_encoder.half() pipe.to(device) return pipe def load_adapter(pipe, ckpt_dir, adapter_name): unet_sub_dir = os.path.join(ckpt_dir, "unet") text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder") pipe.unet.load_adapter(unet_sub_dir, adapter_name=adapter_name) if os.path.exists(text_encoder_sub_dir): pipe.text_encoder.load_adapter(text_encoder_sub_dir, adapter_name=adapter_name) def set_adapter(pipe, adapter_name): pipe.unet.set_adapter(adapter_name) if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.set_adapter(adapter_name) def merging_lora_with_base(pipe, ckpt_dir, adapter_name="default"): unet_sub_dir = os.path.join(ckpt_dir, "unet") text_encoder_sub_dir = os.path.join(ckpt_dir, "text_encoder") if isinstance(pipe.unet, PeftModel): pipe.unet.set_adapter(adapter_name) else: pipe.unet = PeftModel.from_pretrained(pipe.unet, unet_sub_dir, adapter_name=adapter_name) pipe.unet = pipe.unet.merge_and_unload() if os.path.exists(text_encoder_sub_dir): if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.set_adapter(adapter_name) else: pipe.text_encoder = PeftModel.from_pretrained( pipe.text_encoder, text_encoder_sub_dir, adapter_name=adapter_name ) pipe.text_encoder = pipe.text_encoder.merge_and_unload() return pipe def create_weighted_lora_adapter(pipe, adapters, weights, adapter_name="default"): pipe.unet.add_weighted_adapter(adapters, weights, adapter_name) if isinstance(pipe.text_encoder, PeftModel): pipe.text_encoder.add_weighted_adapter(adapters, weights, adapter_name) return pipe%%time pipe = get_lora_sd_pipeline(os.path.join(base_path, "dog_dreambooth_updated"), adapter_name="dog")%%time load_adapter(pipe, os.path.join(base_path, "toy_dreambooth"), adapter_name="toy")pipe = create_weighted_lora_adapter(pipe, ["toy", "dog"], [1.0, 1.05], adapter_name="toy_dog")%%time set_adapter(pipe, adapter_name="dog")prompt = "sks dog playing fetch in the park" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image%%time set_adapter(pipe, adapter_name="toy")prompt = "narendra modi rendered in the style of <1>" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] imageset_adapter(pipe, adapter_name="dog") prompt = "sks dog in a big red bucket" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] imageset_adapter(pipe, adapter_name="toy") prompt = "superman rendered in the style of <1>, close up potrait" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] imageset_adapter(pipe, adapter_name="toy_dog") prompt = "sks dog rendered in the style of <1>, close up potrait, 4K HD" negative_prompt = "low quality, blurry, unfinished" image = pipe(prompt, num_inference_steps=50, guidance_scale=7, negative_prompt=negative_prompt).images[0] image
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/lora_dreambooth/requirements.txt
transformers accelerate evaluate tqdm datasets diffusers Pillow torchvision huggingface_hub safetensors wandb
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/lora_dreambooth/train_dreambooth.py
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from contextlib import nullcontext from pathlib import Path from typing import Optional import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoraConfig, get_peft_model # Will error if the minimal version of diffusers is not installed. Remove at your own risks. check_min_version("0.10.0.dev0") logger = get_logger(__name__) UNET_TARGET_MODULES = ["to_q", "to_v", "query", "value"] # , "ff.net.0.proj"] TEXT_ENCODER_TARGET_MODULES = ["q_proj", "v_proj"] def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.") def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=True, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run dreambooth validation every X steps. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") # lora args parser.add_argument("--use_lora", action="store_true", help="Whether to use Lora for parameter efficient tuning") parser.add_argument("--lora_r", type=int, default=8, help="Lora rank, only used if use_lora is True") parser.add_argument("--lora_alpha", type=int, default=32, help="Lora alpha, only used if use_lora is True") parser.add_argument("--lora_dropout", type=float, default=0.0, help="Lora dropout, only used if use_lora is True") parser.add_argument( "--lora_bias", type=str, default="none", help="Bias type for Lora. Can be 'none', 'all' or 'lora_only', only used if use_lora is True", ) parser.add_argument( "--lora_text_encoder_r", type=int, default=8, help="Lora rank for text encoder, only used if `use_lora` and `train_text_encoder` are True", ) parser.add_argument( "--lora_text_encoder_alpha", type=int, default=32, help="Lora alpha for text encoder, only used if `use_lora` and `train_text_encoder` are True", ) parser.add_argument( "--lora_text_encoder_dropout", type=float, default=0.0, help="Lora dropout for text encoder, only used if `use_lora` and `train_text_encoder` are True", ) parser.add_argument( "--lora_text_encoder_bias", type=str, default="none", help="Bias type for Lora. Can be 'none', 'all' or 'lora_only', only used if use_lora and `train_text_encoder` are True", ) parser.add_argument( "--num_dataloader_workers", type=int, default=1, help="Num of workers for the training dataloader." ) parser.add_argument( "--no_tracemalloc", default=False, action="store_true", help="Flag to stop memory allocation tracing during training. This could speed up training on Windows.", ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--wandb_key", type=str, default=None, help=("If report to option is set to wandb, api-key for wandb used for login to wandb "), ) parser.add_argument( "--wandb_project_name", type=str, default=None, help=("If report to option is set to wandb, project name in wandb for log tracking "), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") return args # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() self.process = psutil.Process() self.cpu_begin = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() return self def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_peak = -1 while True: self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def __exit__(self, *exc): self.peak_monitoring = False gc.collect() torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) self.cpu_end = self.cpu_mem_used() self.cpu_used = b2mb(self.cpu_end - self.cpu_begin) self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") class DreamBoothDataset(Dataset): """ A dataset to prepare the instance and class images with the prompts for fine-tuning the model. It pre-processes the images and the tokenizes prompts. """ def __init__( self, instance_data_root, instance_prompt, tokenizer, class_data_root=None, class_prompt=None, size=512, center_crop=False, ): self.size = size self.center_crop = center_crop self.tokenizer = tokenizer self.instance_data_root = Path(instance_data_root) if not self.instance_data_root.exists(): raise ValueError("Instance images root doesn't exists.") self.instance_images_path = list(Path(instance_data_root).iterdir()) self.num_instance_images = len(self.instance_images_path) self.instance_prompt = instance_prompt self._length = self.num_instance_images if class_data_root is not None: self.class_data_root = Path(class_data_root) self.class_data_root.mkdir(parents=True, exist_ok=True) self.class_images_path = list(self.class_data_root.iterdir()) self.num_class_images = len(self.class_images_path) self._length = max(self.num_class_images, self.num_instance_images) self.class_prompt = class_prompt else: self.class_data_root = None self.image_transforms = transforms.Compose( [ transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR), transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def __len__(self): return self._length def __getitem__(self, index): example = {} instance_image = Image.open(self.instance_images_path[index % self.num_instance_images]) if not instance_image.mode == "RGB": instance_image = instance_image.convert("RGB") example["instance_images"] = self.image_transforms(instance_image) example["instance_prompt_ids"] = self.tokenizer( self.instance_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids if self.class_data_root: class_image = Image.open(self.class_images_path[index % self.num_class_images]) if not class_image.mode == "RGB": class_image = class_image.convert("RGB") example["class_images"] = self.image_transforms(class_image) example["class_prompt_ids"] = self.tokenizer( self.class_prompt, truncation=True, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", ).input_ids return example def collate_fn(examples, with_prior_preservation=False): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.cat(input_ids, dim=0) batch = { "input_ids": input_ids, "pixel_values": pixel_values, } return batch class PromptDataset(Dataset): "A simple dataset to prepare the prompts to generate class images on multiple GPUs." def __init__(self, prompt, num_samples): self.prompt = prompt self.num_samples = num_samples def __len__(self): return self.num_samples def __getitem__(self, index): example = {} example["prompt"] = self.prompt example["index"] = index return example def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): if token is None: token = HfFolder.get_token() if organization is None: username = whoami(token)["name"] return f"{username}/{model_id}" else: return f"{organization}/{model_id}" def main(args): logging_dir = Path(args.output_dir, args.logging_dir) accelerator = Accelerator( gradient_accumulation_steps=args.gradient_accumulation_steps, mixed_precision=args.mixed_precision, log_with=args.report_to, project_dir=logging_dir, ) if args.report_to == "wandb": import wandb wandb.login(key=args.wandb_key) wandb.init(project=args.wandb_project_name) # Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate # This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models. # TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate. if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1: raise ValueError( "Gradient accumulation is not supported when training the text encoder in distributed training. " "Please set gradient_accumulation_steps to 1. This feature will be supported in the future." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Generate class images if prior preservation is enabled. if args.with_prior_preservation: class_images_dir = Path(args.class_data_dir) if not class_images_dir.exists(): class_images_dir.mkdir(parents=True) cur_class_images = len(list(class_images_dir.iterdir())) if cur_class_images < args.num_class_images: torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32 if args.prior_generation_precision == "fp32": torch_dtype = torch.float32 elif args.prior_generation_precision == "fp16": torch_dtype = torch.float16 elif args.prior_generation_precision == "bf16": torch_dtype = torch.bfloat16 pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, torch_dtype=torch_dtype, safety_checker=None, revision=args.revision, ) pipeline.set_progress_bar_config(disable=True) num_new_images = args.num_class_images - cur_class_images logger.info(f"Number of class images to sample: {num_new_images}.") sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): images = pipeline(example["prompt"]).images for i, image in enumerate(images): hash_image = hashlib.sha1(image.tobytes()).hexdigest() image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg" image.save(image_filename) del pipeline if torch.cuda.is_available(): torch.cuda.empty_cache() # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) # noqa: F841 with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) # Load the tokenizer if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # import correct text encoder class text_encoder_cls = import_model_class_from_model_name_or_path(args.pretrained_model_name_or_path, args.revision) # Load scheduler and models noise_scheduler = DDPMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, ) # DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision) unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) if args.use_lora: config = LoraConfig( r=args.lora_r, lora_alpha=args.lora_alpha, target_modules=UNET_TARGET_MODULES, lora_dropout=args.lora_dropout, bias=args.lora_bias, ) unet = get_peft_model(unet, config) unet.print_trainable_parameters() print(unet) vae.requires_grad_(False) if not args.train_text_encoder: text_encoder.requires_grad_(False) elif args.train_text_encoder and args.use_lora: config = LoraConfig( r=args.lora_text_encoder_r, lora_alpha=args.lora_text_encoder_alpha, target_modules=TEXT_ENCODER_TARGET_MODULES, lora_dropout=args.lora_text_encoder_dropout, bias=args.lora_text_encoder_bias, ) text_encoder = get_peft_model(text_encoder, config) text_encoder.print_trainable_parameters() print(text_encoder) if args.enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") if args.gradient_checkpointing: unet.enable_gradient_checkpointing() # below fails when using lora so commenting it out if args.train_text_encoder and not args.use_lora: text_encoder.gradient_checkpointing_enable() # Enable TF32 for faster training on Ampere GPUs, # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices if args.allow_tf32: torch.backends.cuda.matmul.allow_tf32 = True if args.scale_lr: args.learning_rate = ( args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes ) # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs if args.use_8bit_adam: try: import bitsandbytes as bnb except ImportError: raise ImportError( "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`." ) optimizer_class = bnb.optim.AdamW8bit else: optimizer_class = torch.optim.AdamW # Optimizer creation params_to_optimize = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) optimizer = optimizer_class( params_to_optimize, lr=args.learning_rate, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay, eps=args.adam_epsilon, ) # Dataset and DataLoaders creation: train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.num_dataloader_workers, ) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( args.lr_scheduler, optimizer=optimizer, num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps, num_training_steps=args.max_train_steps * args.gradient_accumulation_steps, num_cycles=args.lr_num_cycles, power=args.lr_power, ) # Prepare everything with our `accelerator`. if args.train_text_encoder: unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, text_encoder, optimizer, train_dataloader, lr_scheduler ) else: unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare( unet, optimizer, train_dataloader, lr_scheduler ) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move vae and text_encoder to device and cast to weight_dtype vae.to(accelerator.device, dtype=weight_dtype) if not args.train_text_encoder: text_encoder.to(accelerator.device, dtype=weight_dtype) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("dreambooth", config=vars(args)) # Train! total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num batches each epoch = {len(train_dataloader)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") global_step = 0 first_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint != "latest": path = os.path.basename(args.resume_from_checkpoint) else: # Get the mos recent checkpoint dirs = os.listdir(args.output_dir) dirs = [d for d in dirs if d.startswith("checkpoint")] dirs = sorted(dirs, key=lambda x: int(x.split("-")[1])) path = dirs[-1] accelerator.print(f"Resuming from checkpoint {path}") accelerator.load_state(os.path.join(args.output_dir, path)) global_step = int(path.split("-")[1]) resume_global_step = global_step * args.gradient_accumulation_steps first_epoch = resume_global_step // num_update_steps_per_epoch resume_step = resume_global_step % num_update_steps_per_epoch # Only show the progress bar once on each machine. progress_bar = tqdm(range(global_step, args.max_train_steps), disable=not accelerator.is_local_main_process) progress_bar.set_description("Steps") for epoch in range(first_epoch, args.num_train_epochs): unet.train() if args.train_text_encoder: text_encoder.train() with TorchTracemalloc() if not args.no_tracemalloc else nullcontext() as tracemalloc: for step, batch in enumerate(train_dataloader): # Skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step: if step % args.gradient_accumulation_steps == 0: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) continue with accelerator.accumulate(unet): # Convert images to latent space latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample() latents = latents * 0.18215 # Sample noise that we'll add to the latents noise = torch.randn_like(latents) bsz = latents.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device ) timesteps = timesteps.long() # Add noise to the latents according to the noise magnitude at each timestep # (this is the forward diffusion process) noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps) # Get the text embedding for conditioning encoder_hidden_states = text_encoder(batch["input_ids"])[0] # Predict the noise residual model_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample # Get the target for loss depending on the prediction type if noise_scheduler.config.prediction_type == "epsilon": target = noise elif noise_scheduler.config.prediction_type == "v_prediction": target = noise_scheduler.get_velocity(latents, noise, timesteps) else: raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}") if args.with_prior_preservation: # Chunk the noise and model_pred into two parts and compute the loss on each part separately. model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0) target, target_prior = torch.chunk(target, 2, dim=0) # Compute instance loss loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") # Compute prior loss prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean") # Add the prior loss to the instance loss. loss = loss + args.prior_loss_weight * prior_loss else: loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean") accelerator.backward(loss) if accelerator.sync_gradients: params_to_clip = ( itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters() ) accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) if args.report_to == "wandb": accelerator.print(progress_bar) global_step += 1 # if global_step % args.checkpointing_steps == 0: # if accelerator.is_main_process: # save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}") # accelerator.save_state(save_path) # logger.info(f"Saved state to {save_path}") logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]} progress_bar.set_postfix(**logs) accelerator.log(logs, step=global_step) if ( args.validation_prompt is not None and (step + num_update_steps_per_epoch * epoch) % args.validation_steps == 0 ): logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" f" {args.validation_prompt}." ) # create pipeline pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, safety_checker=None, revision=args.revision, ) # set `keep_fp32_wrapper` to True because we do not want to remove # mixed precision hooks while we are still training pipeline.unet = accelerator.unwrap_model(unet, keep_fp32_wrapper=True) pipeline.text_encoder = accelerator.unwrap_model(text_encoder, keep_fp32_wrapper=True) pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) # run inference if args.seed is not None: generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) else: generator = None images = [] for _ in range(args.num_validation_images): image = pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0] images.append(image) for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") if tracker.name == "wandb": import wandb tracker.log( { "validation": [ wandb.Image(image, caption=f"{i}: {args.validation_prompt}") for i, image in enumerate(images) ] } ) del pipeline torch.cuda.empty_cache() if global_step >= args.max_train_steps: break # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage if not args.no_tracemalloc: accelerator.print("GPU Memory before entering the train : {}".format(b2mb(tracemalloc.begin))) accelerator.print("GPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used)) accelerator.print("GPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked)) accelerator.print( "GPU Total Peak Memory consumed during the train (max): {}".format( tracemalloc.peaked + b2mb(tracemalloc.begin) ) ) accelerator.print("CPU Memory before entering the train : {}".format(b2mb(tracemalloc.cpu_begin))) accelerator.print( "CPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.cpu_used) ) accelerator.print( "CPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.cpu_peaked) ) accelerator.print( "CPU Total Peak Memory consumed during the train (max): {}".format( tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin) ) ) # Create the pipeline using using the trained modules and save it. accelerator.wait_for_everyone() if accelerator.is_main_process: if args.use_lora: unwarpped_unet = accelerator.unwrap_model(unet) unwarpped_unet.save_pretrained( os.path.join(args.output_dir, "unet"), state_dict=accelerator.get_state_dict(unet) ) if args.train_text_encoder: unwarpped_text_encoder = accelerator.unwrap_model(text_encoder) unwarpped_text_encoder.save_pretrained( os.path.join(args.output_dir, "text_encoder"), state_dict=accelerator.get_state_dict(text_encoder), ) else: pipeline = DiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, unet=accelerator.unwrap_model(unet), text_encoder=accelerator.unwrap_model(text_encoder), revision=args.revision, ) pipeline.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True) accelerator.end_training() if __name__ == "__main__": args = parse_args() main(args)
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/lora_dreambooth/colab_notebook.ipynb
%cd "peft-lora-sd-dreambooth" !pip install -r requirements.txt
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/lora_dreambooth/convert_peft_sd_lora_to_kohya_ss.py
import argparse import os from typing import Dict import torch from diffusers import UNet2DConditionModel from safetensors.torch import save_file from transformers import CLIPTextModel from peft import PeftModel, get_peft_model_state_dict # Default kohya_ss LoRA replacement modules # https://github.com/kohya-ss/sd-scripts/blob/c924c47f374ac1b6e33e71f82948eb1853e2243f/networks/lora.py#L664 LORA_PREFIX_UNET = "lora_unet" LORA_PREFIX_TEXT_ENCODER = "lora_te" LORA_ADAPTER_NAME = "default" def get_module_kohya_state_dict( module: PeftModel, prefix: str, dtype: torch.dtype, adapter_name: str = LORA_ADAPTER_NAME ) -> Dict[str, torch.Tensor]: kohya_ss_state_dict = {} for peft_key, weight in get_peft_model_state_dict(module, adapter_name=adapter_name).items(): kohya_key = peft_key.replace("base_model.model", prefix) kohya_key = kohya_key.replace("lora_A", "lora_down") kohya_key = kohya_key.replace("lora_B", "lora_up") kohya_key = kohya_key.replace(".", "_", kohya_key.count(".") - 2) kohya_ss_state_dict[kohya_key] = weight.to(dtype) # Set alpha parameter if "lora_down" in kohya_key: alpha_key = f'{kohya_key.split(".")[0]}.alpha' kohya_ss_state_dict[alpha_key] = torch.tensor(module.peft_config[adapter_name].lora_alpha).to(dtype) return kohya_ss_state_dict if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--sd_checkpoint", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--sd_checkpoint_revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument("--peft_lora_path", default=None, type=str, required=True, help="Path to peft trained LoRA") parser.add_argument( "--dump_path", default=None, type=str, required=True, help="Path to the output safetensors file for use with webui.", ) parser.add_argument("--half", action="store_true", help="Save weights in half precision.") args = parser.parse_args() # Store kohya_ss state dict kohya_ss_state_dict = {} dtype = torch.float16 if args.half else torch.float32 # Load Text Encoder LoRA model text_encoder_peft_lora_path = os.path.join(args.peft_lora_path, "text_encoder") if os.path.exists(text_encoder_peft_lora_path): text_encoder = CLIPTextModel.from_pretrained( args.sd_checkpoint, subfolder="text_encoder", revision=args.sd_checkpoint_revision ) text_encoder = PeftModel.from_pretrained( text_encoder, text_encoder_peft_lora_path, adapter_name=LORA_ADAPTER_NAME ) kohya_ss_state_dict.update( get_module_kohya_state_dict(text_encoder, LORA_PREFIX_TEXT_ENCODER, dtype, LORA_ADAPTER_NAME) ) # Load UNet LoRA model unet_peft_lora_path = os.path.join(args.peft_lora_path, "unet") if os.path.exists(unet_peft_lora_path): unet = UNet2DConditionModel.from_pretrained( args.sd_checkpoint, subfolder="unet", revision=args.sd_checkpoint_revision ) unet = PeftModel.from_pretrained(unet, unet_peft_lora_path, adapter_name=LORA_ADAPTER_NAME) kohya_ss_state_dict.update(get_module_kohya_state_dict(unet, LORA_PREFIX_UNET, dtype, LORA_ADAPTER_NAME)) # Save state dict save_file( kohya_ss_state_dict, args.dump_path, )
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/multi_adapter_examples/PEFT_Multi_LoRA_Inference.ipynb
import os os.environ["CUDA_VISIBLE_DEVICES"] = "0"from huggingface_hub import notebook_login import torch notebook_login()from peft import PeftModel from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig model_name = "decapoda-research/llama-7b-hf" tokenizer = LlamaTokenizer.from_pretrained(model_name) model = LlamaForCausalLM.from_pretrained(model_name, load_in_8bit=True, device_map="auto", use_auth_token=True)%%time model = PeftModel.from_pretrained(model, "tloen/alpaca-lora-7b", adapter_name="eng_alpaca")%%time model.load_adapter("22h/cabrita-lora-v0-1", adapter_name="portuguese_alpaca")modelmodel.to("cuda")import torch device = "cuda" def generate_prompt(instruction, input=None): if input: return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Input: {input} ### Response:""" else: return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Response:""" def evaluate( instruction, input=None, temperature=0.1, top_p=0.75, top_k=40, num_beams=4, max_new_tokens=256, **kwargs, ): prompt = generate_prompt(instruction, input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(device) generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, no_repeat_ngram_size=3, **kwargs, ) with torch.no_grad(): generation_output = model.generate( input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=max_new_tokens, ) s = generation_output.sequences[0] output = tokenizer.decode(s) return output.split("### Response:")[1].strip()%%time model.set_adapter("eng_alpaca")instruction = "Tell me about alpacas." print(evaluate(instruction))%%time model.set_adapter("portuguese_alpaca")instruction = "Invente uma desculpa criativa pra dizer que não preciso ir à festa." print(evaluate(instruction))with model.disable_adapter(): instruction = "Invente uma desculpa criativa pra dizer que não preciso ir à festa." print(evaluate(instruction))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_ds_zero3_offload.py
import gc import os import sys import threading import numpy as np import psutil import torch from accelerate import Accelerator from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from peft import LoraConfig, TaskType, get_peft_model def levenshtein_distance(str1, str2): # TC: O(N^2) # SC: O(N^2) if str1 == str2: return 0 num_rows = len(str1) + 1 num_cols = len(str2) + 1 dp_matrix = np.empty((num_rows, num_cols)) dp_matrix[0, :] = range(num_cols) dp_matrix[:, 0] = range(num_rows) for i in range(1, num_rows): for j in range(1, num_cols): if str1[i - 1] == str2[j - 1]: dp_matrix[i, j] = dp_matrix[i - 1, j - 1] else: dp_matrix[i, j] = min(dp_matrix[i - 1, j - 1], dp_matrix[i - 1, j], dp_matrix[i, j - 1]) + 1 return dp_matrix[num_rows - 1, num_cols - 1] def get_closest_label(eval_pred, classes): min_id = sys.maxsize min_edit_distance = sys.maxsize for i, class_label in enumerate(classes): edit_distance = levenshtein_distance(eval_pred.strip(), class_label) if edit_distance < min_edit_distance: min_id = i min_edit_distance = edit_distance return classes[min_id] # Converting Bytes to Megabytes def b2mb(x): return int(x / 2**20) # This context manager is used to track the peak memory usage of the process class TorchTracemalloc: def __enter__(self): gc.collect() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero self.begin = torch.cuda.memory_allocated() self.process = psutil.Process() self.cpu_begin = self.cpu_mem_used() self.peak_monitoring = True peak_monitor_thread = threading.Thread(target=self.peak_monitor_func) peak_monitor_thread.daemon = True peak_monitor_thread.start() return self def cpu_mem_used(self): """get resident set size memory for the current process""" return self.process.memory_info().rss def peak_monitor_func(self): self.cpu_peak = -1 while True: self.cpu_peak = max(self.cpu_mem_used(), self.cpu_peak) # can't sleep or will not catch the peak right (this comment is here on purpose) # time.sleep(0.001) # 1msec if not self.peak_monitoring: break def __exit__(self, *exc): self.peak_monitoring = False gc.collect() torch.cuda.empty_cache() self.end = torch.cuda.memory_allocated() self.peak = torch.cuda.max_memory_allocated() self.used = b2mb(self.end - self.begin) self.peaked = b2mb(self.peak - self.begin) self.cpu_end = self.cpu_mem_used() self.cpu_used = b2mb(self.cpu_end - self.cpu_begin) self.cpu_peaked = b2mb(self.cpu_peak - self.cpu_begin) # print(f"delta used/peak {self.used:4d}/{self.peaked:4d}") def main(): accelerator = Accelerator() # model_name_or_path = "bigscience/T0_3B" model_name_or_path = "facebook/bart-large" dataset_name = "twitter_complaints" peft_config = LoraConfig( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ) text_column = "Tweet text" label_column = "text_label" lr = 3e-3 num_epochs = 5 batch_size = 8 seed = 42 do_test = False set_seed(seed) dataset = load_dataset("ought/raft", dataset_name) classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names] dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, truncation=True) labels = tokenizer( targets, max_length=target_max_length, padding="max_length", truncation=True, return_tensors="pt" ) labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs with accelerator.main_process_first(): processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=True, desc="Running tokenizer on dataset", ) accelerator.wait_for_everyone() train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["train"] test_dataset = processed_datasets["test"] def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True) test_dataloader = DataLoader(test_dataset, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True) # creating model model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() # optimizer optimizer = torch.optim.AdamW(model.parameters(), lr=lr) # lr scheduler lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler = accelerator.prepare( model, train_dataloader, eval_dataloader, test_dataloader, optimizer, lr_scheduler ) accelerator.print(model) is_ds_zero_3 = False if getattr(accelerator.state, "deepspeed_plugin", None): is_ds_zero_3 = accelerator.state.deepspeed_plugin.zero_stage == 3 for epoch in range(num_epochs): with TorchTracemalloc() as tracemalloc: model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("GPU Memory before entering the train : {}".format(b2mb(tracemalloc.begin))) accelerator.print("GPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used)) accelerator.print("GPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked)) accelerator.print( "GPU Total Peak Memory consumed during the train (max): {}".format( tracemalloc.peaked + b2mb(tracemalloc.begin) ) ) accelerator.print("CPU Memory before entering the train : {}".format(b2mb(tracemalloc.cpu_begin))) accelerator.print("CPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.cpu_used)) accelerator.print("CPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.cpu_peaked)) accelerator.print( "CPU Total Peak Memory consumed during the train (max): {}".format( tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin) ) ) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) accelerator.print(f"{epoch=}: {train_ppl=} {train_epoch_loss=}") model.eval() eval_preds = [] with TorchTracemalloc() as tracemalloc: for _, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = accelerator.unwrap_model(model).generate( **batch, synced_gpus=is_ds_zero_3 ) # synced_gpus=True for DS-stage 3 outputs = accelerator.pad_across_processes(outputs, dim=1, pad_index=tokenizer.pad_token_id) preds = accelerator.gather_for_metrics(outputs).detach().cpu().numpy() eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) # Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage accelerator.print("GPU Memory before entering the eval : {}".format(b2mb(tracemalloc.begin))) accelerator.print("GPU Memory consumed at the end of the eval (end-begin): {}".format(tracemalloc.used)) accelerator.print("GPU Peak Memory consumed during the eval (max-begin): {}".format(tracemalloc.peaked)) accelerator.print( "GPU Total Peak Memory consumed during the eval (max): {}".format( tracemalloc.peaked + b2mb(tracemalloc.begin) ) ) accelerator.print("CPU Memory before entering the eval : {}".format(b2mb(tracemalloc.cpu_begin))) accelerator.print("CPU Memory consumed at the end of the eval (end-begin): {}".format(tracemalloc.cpu_used)) accelerator.print("CPU Peak Memory consumed during the eval (max-begin): {}".format(tracemalloc.cpu_peaked)) accelerator.print( "CPU Total Peak Memory consumed during the eval (max): {}".format( tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin) ) ) correct = 0 total = 0 assert len(eval_preds) == len( dataset["train"][label_column] ), f"{len(eval_preds)} != {len(dataset['train'][label_column])}" for pred, true in zip(eval_preds, dataset["train"][label_column]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 accelerator.print(f"{accuracy=}") accelerator.print(f"{eval_preds[:10]=}") accelerator.print(f"{dataset['train'][label_column][:10]=}") if do_test: model.eval() test_preds = [] for _, batch in enumerate(tqdm(test_dataloader)): batch = {k: v for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = accelerator.unwrap_model(model).generate( **batch, synced_gpus=is_ds_zero_3 ) # synced_gpus=True for DS-stage 3 outputs = accelerator.pad_across_processes(outputs, dim=1, pad_index=tokenizer.pad_token_id) preds = accelerator.gather(outputs).detach().cpu().numpy() test_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) test_preds_cleaned = [] for _, pred in enumerate(test_preds): test_preds_cleaned.append(get_closest_label(pred, classes)) test_df = dataset["test"].to_pandas() assert len(test_preds_cleaned) == len(test_df), f"{len(test_preds_cleaned)} != {len(test_df)}" test_df[label_column] = test_preds_cleaned test_df["text_labels_orig"] = test_preds accelerator.print(test_df[[text_column, label_column]].sample(20)) pred_df = test_df[["ID", label_column]] pred_df.columns = ["ID", "Label"] os.makedirs(f"data/{dataset_name}", exist_ok=True) pred_df.to_csv(f"data/{dataset_name}/predictions.csv", index=False) accelerator.wait_for_everyone() # Option1: Pushing the model to Hugging Face Hub # model.push_to_hub( # f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"), # token = "hf_..." # ) # token (`bool` or `str`, *optional*): # `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated # when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` # is not specified. # Or you can get your token from https://huggingface.co/settings/token # Option2: Saving the model locally peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace( "/", "_" ) model.save_pretrained(peft_model_id) accelerator.wait_for_everyone() if __name__ == "__main__": main()
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/conditional_generation/accelerate_ds_zero3_cpu_offload_config.yaml
compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: none offload_param_device: none zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} machine_rank: 0 main_training_function: main megatron_lm_config: {} mixed_precision: 'no' num_machines: 1 num_processes: 1 rdzv_backend: static same_network: true use_cpu: false
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/conditional_generation/peft_prompt_tuning_seq2seq.ipynb
import os import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup from peft import get_peft_model, PromptTuningConfig, TaskType, PromptTuningInit from torch.utils.data import DataLoader from tqdm import tqdm from datasets import load_dataset os.environ["TOKENIZERS_PARALLELISM"] = "false" device = "cuda" model_name_or_path = "t5-large" tokenizer_name_or_path = "t5-large" checkpoint_name = "financial_sentiment_analysis_prompt_tuning_v1.pt" text_column = "sentence" label_column = "text_label" max_length = 128 lr = 1 num_epochs = 5 batch_size = 8# creating model peft_config = PromptTuningConfig( task_type=TaskType.SEQ_2_SEQ_LM, prompt_tuning_init=PromptTuningInit.TEXT, num_virtual_tokens=20, prompt_tuning_init_text="What is the sentiment of this article?\n", inference_mode=False, tokenizer_name_or_path=model_name_or_path, ) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() model# loading dataset dataset = load_dataset("financial_phrasebank", "sentences_allagree") dataset = dataset["train"].train_test_split(test_size=0.1) dataset["validation"] = dataset["test"] del dataset["test"] classes = dataset["train"].features["label"].names dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, ) dataset["train"][0]# data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer( targets, max_length=target_max_length, padding="max_length", truncation=True, return_tensors="pt" ) labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)# optimizer and lr scheduler optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), )# training and evaluation model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")# print accuracy correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["validation"]["text_label"]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 print(f"{accuracy=} % on the evaluation dataset") print(f"{eval_preds[:10]=}") print(f"{dataset['validation']['text_label'][:10]=}")# saving model peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" model.save_pretrained(peft_model_id)ckpt = f"{peft_model_id}/adapter_model.bin" !du -h $ckptfrom peft import PeftModel, PeftConfig peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id)model.eval() i = 107 input_ids = tokenizer(dataset["validation"][text_column][i], return_tensors="pt").input_ids print(dataset["validation"][text_column][i]) print(input_ids) with torch.no_grad(): outputs = model.generate(input_ids=input_ids, max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/conditional_generation/requirements.txt
transformers accelerate evaluate deepspeed tqdm datasets
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/conditional_generation/peft_prefix_tuning_seq2seq.ipynb
from transformers import AutoModelForSeq2SeqLM from peft import get_peft_config, get_peft_model, get_peft_model_state_dict, PrefixTuningConfig, TaskType import torch from datasets import load_dataset import os os.environ["TOKENIZERS_PARALLELISM"] = "false" os.environ["CUDA_VISIBLE_DEVICES"] = "3" from transformers import AutoTokenizer from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from tqdm import tqdm from datasets import load_dataset device = "cuda" model_name_or_path = "t5-large" tokenizer_name_or_path = "t5-large" checkpoint_name = "financial_sentiment_analysis_prefix_tuning_v1.pt" text_column = "sentence" label_column = "text_label" max_length = 128 lr = 1e-2 num_epochs = 5 batch_size = 8# creating model peft_config = PrefixTuningConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, num_virtual_tokens=20) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() model# loading dataset dataset = load_dataset("financial_phrasebank", "sentences_allagree") dataset = dataset["train"].train_test_split(test_size=0.1) dataset["validation"] = dataset["test"] del dataset["test"] classes = dataset["train"].features["label"].names dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, ) dataset["train"][0]# data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)# optimizer and lr scheduler optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), )# training and evaluation model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")# print accuracy correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["validation"]["text_label"]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 print(f"{accuracy=} % on the evaluation dataset") print(f"{eval_preds[:10]=}") print(f"{dataset['validation']['text_label'][:10]=}")# saving model peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" model.save_pretrained(peft_model_id)ckpt = f"{peft_model_id}/adapter_model.bin" !du -h $ckptfrom peft import PeftModel, PeftConfig peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id)model.eval() i = 107 inputs = tokenizer(dataset["validation"][text_column][i], return_tensors="pt") print(dataset["validation"][text_column][i]) print(inputs) with torch.no_grad(): outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/conditional_generation/multitask_prompt_tuning.ipynb
from datasets import load_dataset from transformers import set_seed, AutoModelForSeq2SeqLM, AutoTokenizer from peft import get_peft_model, MultitaskPromptTuningConfig, TaskType, MultitaskPromptTuningInit set_seed(42) model_name = "google/flan-t5-base" peft_config = MultitaskPromptTuningConfig( tokenizer_name_or_path=model_name, num_tasks=2, task_type=TaskType.SEQ_2_SEQ_LM, prompt_tuning_init=MultitaskPromptTuningInit.TEXT, num_virtual_tokens=50, num_transformer_submodules=1, prompt_tuning_init_text="classify the following into either positive or negative, or entailment, neutral or contradiction:", ) tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) model = get_peft_model(model, peft_config) model = model.cuda() def send_to_device(batch): for i in batch: batch[i] = batch[i].cuda() return batchdef get_sst2(split: str): examples = load_dataset("sst2")[split] result_examples = [] for example in examples: result_examples.append({}) result_examples[-1]["input"] = example["sentence"].strip() + "</s>" result_examples[-1]["output"] = ( f"positive{tokenizer.eos_token}" if example["label"] == 1 else f"negative{tokenizer.eos_token}" ) result_examples[-1]["task_id"] = 0 return result_examples def get_mnli(split: str): examples = load_dataset("multi_nli")[split] result_examples = [] for example in examples: result_examples.append({}) result_examples[-1]["input"] = example["premise"].strip() + " " + example["hypothesis"].strip() + "</s>" if example["label"] == 0: result_examples[-1]["output"] = f"entailment{tokenizer.eos_token}" elif example["label"] == 1: result_examples[-1]["output"] = f"neutral{tokenizer.eos_token}" else: result_examples[-1]["output"] = f"contradiction{tokenizer.eos_token}" result_examples[-1]["task_id"] = 1 return result_examplesfrom typing import Tuple from torch.utils.data import Dataset, DataLoader import torch class MyDataset(Dataset): def __init__(self, split: str, mode: str = "source") -> None: super().__init__() if split == "train": if mode == "source": self.examples = get_sst2(split) + get_mnli(split) elif mode == "target": self.examples = get_sst2(split) if split == "val": self.examples = get_sst2("validation") if split == "test": self.examples = get_sst2("validation") def __getitem__(self, index) -> dict: return self.examples[index] def __len__(self) -> int: return len(self.examples) def __getitem__(self, index) -> dict: return self.examples[index] def __len__(self) -> int: return len(self.examples) def collate_fn(batch: dict) -> Tuple[torch.Tensor, torch.Tensor]: input = [i["input"] for i in batch] input = tokenizer(input, add_special_tokens=False, return_tensors="pt", padding=True) output = [i["output"] for i in batch] output = tokenizer(output, add_special_tokens=False, return_tensors="pt", padding=True).input_ids output[output == tokenizer.pad_token_id] = -100 task_ids = [i["task_id"] for i in batch] task_ids = torch.tensor(task_ids) return { "input_ids": input.input_ids, "attention_mask": input.attention_mask, "labels": output, "task_ids": task_ids, } train = DataLoader(MyDataset("train"), shuffle=True, batch_size=8, collate_fn=collate_fn) val = DataLoader(MyDataset("val"), shuffle=False, batch_size=8, collate_fn=collate_fn) test = DataLoader(MyDataset("test"), shuffle=False, batch_size=8, collate_fn=collate_fn)from torch.optim.adamw import AdamW from transformers import get_cosine_schedule_with_warmup from tqdm import tqdm from sklearn.metrics import f1_scorePOSITIVE_TOKEN_ID = tokenizer(" positive", add_special_tokens=False)["input_ids"][0] NEGATIVE_TOKEN_ID = tokenizer(" negative", add_special_tokens=False)["input_ids"][0] def classify(batch): batch = send_to_device(batch) # we pass labels here since we need to generate and peft doesn't support generation yet. # No clue how to get around this scores = model(**batch).logits preds = [] for i in range(scores.shape[0]): if scores[i, 0, POSITIVE_TOKEN_ID] > scores[i, 0, NEGATIVE_TOKEN_ID]: preds.append(POSITIVE_TOKEN_ID) else: preds.append(NEGATIVE_TOKEN_ID) return preds @torch.inference_mode() def evaluate(model, data): loss = 0 preds = [] golds = [] for batch in tqdm(data): batch = send_to_device(batch) loss += model(**batch).loss golds.extend(batch["labels"][:, 0].tolist()) preds.extend(classify(batch)) return loss / len(val), f1_score(golds, preds, pos_label=POSITIVE_TOKEN_ID) optimizer = AdamW(model.parameters(), lr=1e-4) scheduler = get_cosine_schedule_with_warmup(optimizer, 200, len(train)) n = 1000 step = 0 train_ = tqdm(train) val_loss, f1 = evaluate(model, val) print( f""" before source training val loss = {val_loss} f1 = {f1}""" ) for batch in train_: if step % n == 0: val_loss, f1 = evaluate(model, val) print( f""" step = {step} val loss = {val_loss} f1 = {f1}""" ) model.save_pretrained(f"checkpoints_source/{step}") step += 1 batch = send_to_device(batch) loss = model(**batch).loss loss.backward() optimizer.step() scheduler.step() train_.set_postfix(train_loss=loss)train = DataLoader(MyDataset("train", "target"), shuffle=True, batch_size=8, collate_fn=collate_fn) val = DataLoader(MyDataset("val", "target"), shuffle=False, batch_size=8, collate_fn=collate_fn) test = DataLoader(MyDataset("test", "target"), shuffle=False, batch_size=8, collate_fn=collate_fn)peft_config = MultitaskPromptTuningConfig( tokenizer_name_or_path=model_name, num_tasks=1, task_type=TaskType.SEQ_2_SEQ_LM, prompt_tuning_init=MultitaskPromptTuningInit.EXACT_SOURCE_TASK, prompt_tuning_init_state_dict_path="checkpoints_source/50000/adapter_model.bin", num_virtual_tokens=50, num_transformer_submodules=1, ) tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) model = get_peft_model(model, peft_config) model = model.cuda()optimizer = AdamW(model.parameters(), lr=1e-4) scheduler = get_cosine_schedule_with_warmup(optimizer, 200, len(train)) n = 1000 step = 0 train_ = tqdm(train) val_loss, f1 = evaluate(model, val) print( f""" before target training val loss = {val_loss} f1 = {f1}""" ) for batch in train_: if step % n == 0: val_loss, f1 = evaluate(model, val) print( f""" step = {step} val loss = {val_loss} f1 = {f1}""" ) model.save_pretrained(f"checkpoints_target/{step}") step += 1 batch = send_to_device(batch) loss = model(**batch).loss loss.backward() optimizer.step() scheduler.step() train_.set_postfix(train_loss=loss)# load last checkpoint for now from peft import set_peft_model_state_dict sd_6000 = torch.load("checkpoints_target/6000/adapter_model.bin") set_peft_model_state_dict(model, sd_6000) # evaluate val val_loss, f1 = evaluate(model, val) print( f""" final val loss = {val_loss} f1 = {f1}""" ) # evaluate test test_loss, f1 = evaluate(model, test) print( f""" final test loss = {test_loss} f1 = {f1}""" )
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/conditional_generation/peft_adalora_seq2seq.py
import os import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup from peft import AdaLoraConfig, PeftConfig, PeftModel, TaskType, get_peft_model os.environ["TOKENIZERS_PARALLELISM"] = "false" device = "cuda" model_name_or_path = "facebook/bart-base" tokenizer_name_or_path = "facebook/bart-base" checkpoint_name = "financial_sentiment_analysis_lora_v1.pt" text_column = "sentence" label_column = "text_label" max_length = 128 lr = 1e-3 num_epochs = 8 batch_size = 8 # creating model peft_config = AdaLoraConfig( init_r=12, target_r=8, beta1=0.85, beta2=0.85, tinit=200, tfinal=1000, deltaT=10, lora_alpha=32, lora_dropout=0.1, task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, ) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() # loading dataset dataset = load_dataset("financial_phrasebank", "sentences_allagree") dataset = dataset["train"].train_test_split(test_size=0.1) dataset["validation"] = dataset["test"] del dataset["test"] classes = dataset["train"].features["label"].names dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, ) # data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=3, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) # optimizer and lr scheduler optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) model.base_model.peft_config.total_step = len(train_dataloader) * num_epochs # training and evaluation model = model.to(device) global_step = 0 for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() # Update the importance of low-rank matrices # and allocate the budget accordingly. model.base_model.update_and_allocate(global_step) optimizer.zero_grad() global_step += 1 model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(train_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(eval_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}") # print accuracy correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["validation"]["text_label"]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 print(f"{accuracy=} % on the evaluation dataset") print(f"{eval_preds[:10]=}") print(f"{dataset['validation']['text_label'][:10]=}") # saving model peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" model.save_pretrained(peft_model_id) ckpt = f"{peft_model_id}/adapter_model.bin" # get_ipython().system('du -h $ckpt') peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id) model.eval() i = 13 inputs = tokenizer(dataset["validation"][text_column][i], return_tensors="pt") print(dataset["validation"][text_column][i]) print(inputs) with torch.no_grad(): outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/conditional_generation/peft_prompt_tuning_seq2seq_with_generate.ipynb
import os import torch from transformers import ( AutoTokenizer, default_data_collator, AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer, GenerationConfig, ) from peft import get_peft_model, PromptTuningInit, PromptTuningConfig, TaskType from datasets import load_dataset os.environ["CUDA_VISIBLE_DEVICES"] = "0" os.environ["TOKENIZERS_PARALLELISM"] = "false" device = "cuda" model_name_or_path = "t5-large" tokenizer_name_or_path = "t5-large" checkpoint_name = "financial_sentiment_analysis_prefix_tuning_v1.pt" text_column = "sentence" label_column = "text_label" max_length = 8 lr = 1e0 num_epochs = 5 batch_size = 8# creating model peft_config = peft_config = PromptTuningConfig( task_type=TaskType.SEQ_2_SEQ_LM, prompt_tuning_init=PromptTuningInit.TEXT, num_virtual_tokens=20, prompt_tuning_init_text="What is the sentiment of this article?\n", inference_mode=False, tokenizer_name_or_path=model_name_or_path, ) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() model# loading dataset dataset = load_dataset("financial_phrasebank", "sentences_allagree") dataset = dataset["train"].train_test_split(test_size=0.1) dataset["validation"] = dataset["test"] del dataset["test"] classes = dataset["train"].features["label"].names dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, ) dataset["train"][0]# data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"].shuffle() eval_dataset = processed_datasets["validation"]# training and evaluation def compute_metrics(eval_preds): preds, labels = eval_preds preds = tokenizer.batch_decode(preds, skip_special_tokens=True) labels = tokenizer.batch_decode(labels, skip_special_tokens=True) correct = 0 total = 0 for pred, true in zip(preds, labels): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total return {"accuracy": accuracy} training_args = Seq2SeqTrainingArguments( "out", per_device_train_batch_size=batch_size, learning_rate=lr, num_train_epochs=num_epochs, evaluation_strategy="epoch", logging_strategy="epoch", save_strategy="no", report_to=[], predict_with_generate=True, generation_config=GenerationConfig(max_length=max_length), ) trainer = Seq2SeqTrainer( model=model, tokenizer=tokenizer, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=default_data_collator, compute_metrics=compute_metrics, ) trainer.train()# saving model peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" model.save_pretrained(peft_model_id)ckpt = f"{peft_model_id}/adapter_model.bin" !du -h $ckptfrom peft import PeftModel, PeftConfig peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id)model.eval() i = 107 inputs = tokenizer(dataset["validation"][text_column][i], return_tensors="pt") print(dataset["validation"][text_column][i]) print(inputs) with torch.no_grad(): outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/conditional_generation/peft_lora_seq2seq.ipynb
from transformers import AutoModelForSeq2SeqLM from peft import get_peft_config, get_peft_model, get_peft_model_state_dict, LoraConfig, TaskType import torch from datasets import load_dataset import os os.environ["TOKENIZERS_PARALLELISM"] = "false" from transformers import AutoTokenizer from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from tqdm import tqdm from datasets import load_dataset device = "cuda" model_name_or_path = "bigscience/mt0-large" tokenizer_name_or_path = "bigscience/mt0-large" checkpoint_name = "financial_sentiment_analysis_lora_v1.pt" text_column = "sentence" label_column = "text_label" max_length = 128 lr = 1e-3 num_epochs = 3 batch_size = 8# creating model peft_config = LoraConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() model# loading dataset dataset = load_dataset("financial_phrasebank", "sentences_allagree") dataset = dataset["train"].train_test_split(test_size=0.1) dataset["validation"] = dataset["test"] del dataset["test"] classes = dataset["train"].features["label"].names dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, ) dataset["train"][0]# data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=3, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)# optimizer and lr scheduler optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), )# training and evaluation model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")# print accuracy correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["validation"]["text_label"]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 print(f"{accuracy=} % on the evaluation dataset") print(f"{eval_preds[:10]=}") print(f"{dataset['validation']['text_label'][:10]=}")# saving model peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" model.save_pretrained(peft_model_id)ckpt = f"{peft_model_id}/adapter_model.bin" !du -h $ckptfrom peft import PeftModel, PeftConfig peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id)model.eval() i = 13 inputs = tokenizer(dataset["validation"][text_column][i], return_tensors="pt") print(dataset["validation"][text_column][i]) print(inputs) with torch.no_grad(): outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_big_model_inference.ipynb
from transformers import AutoModelForSeq2SeqLM from peft import PeftModel, PeftConfig import torch from datasets import load_dataset import os from transformers import AutoTokenizer from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from tqdm import tqdm from datasets import load_dataset dataset_name = "twitter_complaints" text_column = "Tweet text" label_column = "text_label" batch_size = 8 peft_model_id = "smangrul/twitter_complaints_bigscience_T0_3B_LORA_SEQ_2_SEQ_LM" config = PeftConfig.from_pretrained(peft_model_id)peft_model_id = "smangrul/twitter_complaints_bigscience_T0_3B_LORA_SEQ_2_SEQ_LM" max_memory = {0: "6GIB", 1: "0GIB", 2: "0GIB", 3: "0GIB", 4: "0GIB", "cpu": "30GB"} config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path, device_map="auto", max_memory=max_memory) model = PeftModel.from_pretrained(model, peft_model_id, device_map="auto", max_memory=max_memory)from datasets import load_dataset dataset = load_dataset("ought/raft", dataset_name) classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names] print(classes) dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["Label"]]}, batched=True, num_proc=1, ) print(dataset) dataset["train"][0]tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes]) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, truncation=True) labels = tokenizer( targets, max_length=target_max_length, padding="max_length", truncation=True, return_tensors="pt" ) labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=True, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["train"] test_dataset = processed_datasets["test"] def collate_fn(examples): return tokenizer.pad(examples, padding="longest", return_tensors="pt") train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True) test_dataloader = DataLoader(test_dataset, collate_fn=collate_fn, batch_size=batch_size, pin_memory=True)model.eval() i = 15 inputs = tokenizer(f'{text_column} : {dataset["test"][i]["Tweet text"]} Label : ', return_tensors="pt") print(dataset["test"][i]["Tweet text"]) print(inputs) with torch.no_grad(): outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))model.eval() eval_preds = [] for _, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to("cuda") for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = model.generate(**batch, max_new_tokens=10) preds = outputs.detach().cpu().numpy() eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True))correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["train"][label_column]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 print(f"{accuracy=}") print(f"{eval_preds[:10]=}") print(f"{dataset['train'][label_column][:10]=}")model.eval() test_preds = [] for _, batch in enumerate(tqdm(test_dataloader)): batch = {k: v for k, v in batch.items() if k != "labels"} with torch.no_grad(): outputs = model.generate(**batch, max_new_tokens=10) preds = outputs.detach().cpu().numpy() test_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) if len(test_preds) > 100: break test_preds
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/conditional_generation/peft_lora_seq2seq_accelerate_fsdp.py
import os import torch from accelerate import Accelerator from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup from peft import LoraConfig, TaskType, get_peft_model from peft.utils.other import fsdp_auto_wrap_policy def main(): accelerator = Accelerator() model_name_or_path = "t5-base" batch_size = 8 text_column = "sentence" label_column = "label" max_length = 64 lr = 1e-3 num_epochs = 1 base_path = "temp/data/FinancialPhraseBank-v1.0" peft_config = LoraConfig( task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) accelerator.print(model.print_trainable_parameters()) dataset = load_dataset( "json", data_files={ "train": os.path.join(base_path, "financial_phrase_bank_train.jsonl"), "validation": os.path.join(base_path, "financial_phrase_bank_val.jsonl"), }, ) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer( inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt" ) labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs with accelerator.main_process_first(): processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) if getattr(accelerator.state, "fsdp_plugin", None) is not None: accelerator.state.fsdp_plugin.auto_wrap_policy = fsdp_auto_wrap_policy(model) model, train_dataloader, eval_dataloader, optimizer, lr_scheduler = accelerator.prepare( model, train_dataloader, eval_dataloader, optimizer, lr_scheduler ) accelerator.print(model) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() preds = accelerator.gather_for_metrics(torch.argmax(outputs.logits, -1)).detach().cpu().numpy() eval_preds.extend(tokenizer.batch_decode(preds, skip_special_tokens=True)) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) accelerator.print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}") correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["validation"][label_column]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 accelerator.print(f"{accuracy=}") accelerator.print(f"{eval_preds[:10]=}") accelerator.print(f"{dataset['validation'][label_column][:10]=}") accelerator.wait_for_everyone() # Option1: Pushing the model to Hugging Face Hub # model.push_to_hub( # f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"), # token = "hf_..." # ) # token (`bool` or `str`, *optional*): # `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated # when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url` # is not specified. # Or you can get your token from https://huggingface.co/settings/token # Option2: Saving the model locally peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_") model.save_pretrained(peft_model_id) accelerator.wait_for_everyone() if __name__ == "__main__": main()
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/conditional_generation/peft_ia3_seq2seq.ipynb
from transformers import AutoModelForSeq2SeqLM import peft from peft import get_peft_config, get_peft_model, get_peft_model_state_dict, IA3Config, TaskType import torch from datasets import load_dataset import os os.environ["TOKENIZERS_PARALLELISM"] = "false" from transformers import AutoTokenizer from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from tqdm import tqdm from datasets import load_dataset device = "cuda" model_name_or_path = "bigscience/mt0-large" tokenizer_name_or_path = "bigscience/mt0-large" checkpoint_name = "financial_sentiment_analysis_ia3_v1.pt" text_column = "sentence" label_column = "text_label" max_length = 128 lr = 8e-3 num_epochs = 3 batch_size = 8import importlib importlib.reload(peft)# creating model peft_config = IA3Config(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, feedforward_modules=[]) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)modelmodel = get_peft_model(model, peft_config) model.print_trainable_parameters() model# loading dataset dataset = load_dataset("financial_phrasebank", "sentences_allagree") dataset = dataset["train"].train_test_split(test_size=0.1) dataset["validation"] = dataset["test"] del dataset["test"] classes = dataset["train"].features["label"].names dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, ) dataset["train"][0]# data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=3, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)# optimizer and lr scheduler optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), )# training and evaluation model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")# print accuracy correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["validation"]["text_label"]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 print(f"{accuracy=} % on the evaluation dataset") print(f"{eval_preds[:10]=}") print(f"{dataset['validation']['text_label'][:10]=}")# saving model peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" model.save_pretrained(peft_model_id)ckpt = f"{peft_model_id}/adapter_model.bin" !du -h $ckptfrom peft import PeftModel, PeftConfig peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id)model.eval() i = 13 inputs = tokenizer(dataset["validation"][text_column][i], return_tensors="pt") print(dataset["validation"][text_column][i]) print(inputs) with torch.no_grad(): outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/token_classification/peft_lora_token_cls.ipynb
# ! rm -r unilm # ! pip install unilm# ! wget https://guillaumejaume.github.io/FUNSD/dataset.zip # ! unzip dataset.zip && mv dataset data && rm -rf dataset.zip __MACOSXfrom PIL import Image, ImageDraw, ImageFont import os base_path = "/home/sourab/temp/data/dataset" image = Image.open(os.path.join(base_path, "training_data/images/0000971160.png")) image = image.convert("RGB") imageimport json with open(os.path.join(base_path, "training_data/annotations/0000971160.json")) as f: data = json.load(f) for annotation in data["form"]: print(annotation)draw = ImageDraw.Draw(image, "RGBA") font = ImageFont.load_default() label2color = {"question": "blue", "answer": "green", "header": "orange", "other": "violet"} for annotation in data["form"]: label = annotation["label"] general_box = annotation["box"] draw.rectangle(general_box, outline=label2color[label], width=2) draw.text((general_box[0] + 10, general_box[1] - 10), label, fill=label2color[label], font=font) words = annotation["words"] for word in words: box = word["box"] draw.rectangle(box, outline=label2color[label], width=1) image# ! python unilm/layoutlm/examples/seq_labeling/preprocess.py --data_dir data/dataset/training_data/annotations \ # --data_split train \ # --output_dir data \ # --model_name_or_path microsoft/layoutlm-base-uncased \ # --max_len 510 # ! python unilm/layoutlm/examples/seq_labeling/preprocess.py --data_dir data/dataset/testing_data/annotations \ # --data_split test \ # --output_dir data \ # --model_name_or_path microsoft/layoutlm-base-uncased \ # --max_len 510# ! cat data/train.txt | cut -d$'\t' -f 2 | grep -v "^$"| sort | uniq > data/labels.txtfrom torch.nn import CrossEntropyLoss def get_labels(path): with open(path, "r") as f: labels = f.read().splitlines() if "O" not in labels: labels = ["O"] + labels return labels labels = get_labels("data/labels.txt") num_labels = len(labels) label_map = {i: label for i, label in enumerate(labels)} # Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later pad_token_label_id = CrossEntropyLoss().ignore_indexprint(labels)import logging import os import torch from torch.utils.data import Dataset logger = logging.getLogger(__name__) class FunsdDataset(Dataset): def __init__(self, args, tokenizer, labels, pad_token_label_id, mode): if args.local_rank not in [-1, 0] and mode == "train": torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}".format( mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) examples = read_examples_from_file(args.data_dir, mode) features = convert_examples_to_features( examples, labels, args.max_seq_length, tokenizer, cls_token_at_end=bool(args.model_type in ["xlnet"]), # xlnet has a cls token at the end cls_token=tokenizer.cls_token, cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0, sep_token=tokenizer.sep_token, sep_token_extra=bool(args.model_type in ["roberta"]), # roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805 pad_on_left=bool(args.model_type in ["xlnet"]), # pad on the left for xlnet pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0, pad_token_label_id=pad_token_label_id, ) # if args.local_rank in [-1, 0]: # logger.info("Saving features into cached file %s", cached_features_file) # torch.save(features, cached_features_file) if args.local_rank == 0 and mode == "train": torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache self.features = features # Convert to Tensors and build dataset self.all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) self.all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long) self.all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long) self.all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long) self.all_bboxes = torch.tensor([f.boxes for f in features], dtype=torch.long) def __len__(self): return len(self.features) def __getitem__(self, index): return ( self.all_input_ids[index], self.all_input_mask[index], self.all_segment_ids[index], self.all_label_ids[index], self.all_bboxes[index], ) class InputExample(object): """A single training/test example for token classification.""" def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size): """Constructs a InputExample. Args: guid: Unique id for the example. words: list. The words of the sequence. labels: (Optional) list. The labels for each word of the sequence. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.words = words self.labels = labels self.boxes = boxes self.actual_bboxes = actual_bboxes self.file_name = file_name self.page_size = page_size class InputFeatures(object): """A single set of features of data.""" def __init__( self, input_ids, input_mask, segment_ids, label_ids, boxes, actual_bboxes, file_name, page_size, ): assert ( 0 <= all(boxes) <= 1000 ), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(boxes) self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_ids = label_ids self.boxes = boxes self.actual_bboxes = actual_bboxes self.file_name = file_name self.page_size = page_size def read_examples_from_file(data_dir, mode): file_path = os.path.join(data_dir, "{}.txt".format(mode)) box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode)) image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode)) guid_index = 1 examples = [] with open(file_path, encoding="utf-8") as f, open(box_file_path, encoding="utf-8") as fb, open( image_file_path, encoding="utf-8" ) as fi: words = [] boxes = [] actual_bboxes = [] file_name = None page_size = None labels = [] for line, bline, iline in zip(f, fb, fi): if line.startswith("-DOCSTART-") or line == "" or line == "\n": if words: examples.append( InputExample( guid="{}-{}".format(mode, guid_index), words=words, labels=labels, boxes=boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) guid_index += 1 words = [] boxes = [] actual_bboxes = [] file_name = None page_size = None labels = [] else: splits = line.split("\t") bsplits = bline.split("\t") isplits = iline.split("\t") assert len(splits) == 2 assert len(bsplits) == 2 assert len(isplits) == 4 assert splits[0] == bsplits[0] words.append(splits[0]) if len(splits) > 1: labels.append(splits[-1].replace("\n", "")) box = bsplits[-1].replace("\n", "") box = [int(b) for b in box.split()] boxes.append(box) actual_bbox = [int(b) for b in isplits[1].split()] actual_bboxes.append(actual_bbox) page_size = [int(i) for i in isplits[2].split()] file_name = isplits[3].strip() else: # Examples could have no label for mode = "test" labels.append("O") if words: examples.append( InputExample( guid="%s-%d".format(mode, guid_index), words=words, labels=labels, boxes=boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) return examples def convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, cls_token_at_end=False, cls_token="[CLS]", cls_token_segment_id=1, sep_token="[SEP]", sep_token_extra=False, pad_on_left=False, pad_token=0, cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_segment_id=0, pad_token_label_id=-1, sequence_a_segment_id=0, mask_padding_with_zero=True, ): """Loads a data file into a list of `InputBatch`s `cls_token_at_end` define the location of the CLS token: - False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP] - True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS] `cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet) """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for ex_index, example in enumerate(examples): file_name = example.file_name page_size = example.page_size width, height = page_size if ex_index % 10000 == 0: logger.info("Writing example %d of %d", ex_index, len(examples)) tokens = [] token_boxes = [] actual_bboxes = [] label_ids = [] for word, label, box, actual_bbox in zip(example.words, example.labels, example.boxes, example.actual_bboxes): word_tokens = tokenizer.tokenize(word) tokens.extend(word_tokens) token_boxes.extend([box] * len(word_tokens)) actual_bboxes.extend([actual_bbox] * len(word_tokens)) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. special_tokens_count = 3 if sep_token_extra else 2 if len(tokens) > max_seq_length - special_tokens_count: tokens = tokens[: (max_seq_length - special_tokens_count)] token_boxes = token_boxes[: (max_seq_length - special_tokens_count)] actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)] label_ids = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] token_boxes += [sep_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] token_boxes += [sep_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] segment_ids = [sequence_a_segment_id] * len(tokens) if cls_token_at_end: tokens += [cls_token] token_boxes += [cls_token_box] actual_bboxes += [[0, 0, width, height]] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: tokens = [cls_token] + tokens token_boxes = [cls_token_box] + token_boxes actual_bboxes = [[0, 0, width, height]] + actual_bboxes label_ids = [pad_token_label_id] + label_ids segment_ids = [cls_token_segment_id] + segment_ids input_ids = tokenizer.convert_tokens_to_ids(tokens) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) # Zero-pad up to the sequence length. padding_length = max_seq_length - len(input_ids) if pad_on_left: input_ids = ([pad_token] * padding_length) + input_ids input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids label_ids = ([pad_token_label_id] * padding_length) + label_ids token_boxes = ([pad_token_box] * padding_length) + token_boxes else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length token_boxes += [pad_token_box] * padding_length assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length assert len(token_boxes) == max_seq_length if ex_index < 5: logger.info("*** Example ***") logger.info("guid: %s", example.guid) logger.info("tokens: %s", " ".join([str(x) for x in tokens])) logger.info("input_ids: %s", " ".join([str(x) for x in input_ids])) logger.info("input_mask: %s", " ".join([str(x) for x in input_mask])) logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids])) logger.info("label_ids: %s", " ".join([str(x) for x in label_ids])) logger.info("boxes: %s", " ".join([str(x) for x in token_boxes])) logger.info("actual_bboxes: %s", " ".join([str(x) for x in actual_bboxes])) features.append( InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids, boxes=token_boxes, actual_bboxes=actual_bboxes, file_name=file_name, page_size=page_size, ) ) return featuresfrom transformers import LayoutLMTokenizer # from .unilm.layoutlm.data.funsd import FunsdDataset, InputFeatures from torch.utils.data import DataLoader, RandomSampler, SequentialSampler batch_size = 16 args = { "local_rank": -1, "overwrite_cache": True, "data_dir": "/home/sourab/temp/data/", "model_name_or_path": "microsoft/layoutlm-base-uncased", "max_seq_length": 512, "model_type": "layoutlm", } # class to turn the keys of a dict into attributes (thanks Stackoverflow) class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self args = AttrDict(args) tokenizer = LayoutLMTokenizer.from_pretrained("microsoft/layoutlm-base-uncased") # the LayoutLM authors already defined a specific FunsdDataset, so we are going to use this here train_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode="train") train_sampler = RandomSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=batch_size) eval_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode="test") eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=batch_size)len(train_dataloader)len(eval_dataloader)batch = next(iter(train_dataloader)) input_ids = batch[0][0] tokenizer.decode(input_ids)from peft import get_peft_config, PeftModel, get_peft_model, LoraConfig, TaskType peft_config = LoraConfig( task_type=TaskType.TOKEN_CLS, inference_mode=False, r=16, lora_alpha=16, lora_dropout=0.1, bias="all" ) peft_configfrom transformers import LayoutLMForTokenClassification import torch from transformers import set_seed seed = 100 set_seed(seed) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = LayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=num_labels) model = get_peft_model(model, peft_config) model.to(device)print(model.model.layoutlm.encoder.layer[0].attention.self.query.weight) print(model.model.layoutlm.encoder.layer[0].attention.self.query.lora_A.weight) print(model.model.classifier.weight)from transformers import AdamW, get_linear_schedule_with_warmup from tqdm import tqdm num_train_epochs = 100 optimizer = torch.optim.AdamW(model.parameters(), lr=3e-3) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0.06 * (len(train_dataloader) * num_train_epochs), num_training_steps=(len(train_dataloader) * num_train_epochs), ) global_step = 0 t_total = len(train_dataloader) * num_train_epochs # total number of training steps # put the model in training mode model.train() for epoch in range(num_train_epochs): for batch in tqdm(train_dataloader, desc="Training"): input_ids = batch[0].to(device) bbox = batch[4].to(device) attention_mask = batch[1].to(device) token_type_ids = batch[2].to(device) labels = batch[3].to(device) # forward pass outputs = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels ) loss = outputs.loss # print loss every 100 steps if global_step % 10 == 0: print(f"Loss after {global_step} steps: {loss.item()}") # backward pass to get the gradients loss.backward() # print("Gradients on classification head:") # print(model.classifier.weight.grad[6,:].sum()) # update optimizer.step() lr_scheduler.step() optimizer.zero_grad() global_step += 1import numpy as np from seqeval.metrics import ( classification_report, f1_score, precision_score, recall_score, ) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None # put model in evaluation mode model.eval() for batch in tqdm(eval_dataloader, desc="Evaluating"): with torch.no_grad(): input_ids = batch[0].to(device) bbox = batch[4].to(device) attention_mask = batch[1].to(device) token_type_ids = batch[2].to(device) labels = batch[3].to(device) # forward pass outputs = model( input_ids=input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, labels=labels ) # get the loss and logits tmp_eval_loss = outputs.loss logits = outputs.logits eval_loss += tmp_eval_loss.item() nb_eval_steps += 1 # compute the predictions if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = labels.detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0) # compute average evaluation loss eval_loss = eval_loss / nb_eval_steps preds = np.argmax(preds, axis=2) out_label_list = [[] for _ in range(out_label_ids.shape[0])] preds_list = [[] for _ in range(out_label_ids.shape[0])] for i in range(out_label_ids.shape[0]): for j in range(out_label_ids.shape[1]): if out_label_ids[i, j] != pad_token_label_id: out_label_list[i].append(label_map[out_label_ids[i][j]]) preds_list[i].append(label_map[preds[i][j]]) results = { "loss": eval_loss, "precision": precision_score(out_label_list, preds_list), "recall": recall_score(out_label_list, preds_list), "f1": f1_score(out_label_list, preds_list), } print(results)model.print_trainable_parameters()model.save_pretrained("peft_layoutlm")
0