id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
161,386
import os import pickle import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from rliable import library as rly, metrics, plot_utils for algo in full_offline_scores: for data in full_offline_scores[algo]: full_offline_scores[algo][data] = [s[0] for s in full_scores[algo][data]] full_online_scores[algo][data] = [s[1] for s in full_scores[algo][data]] regrets[algo][data] = np.mean([s[2] for s in full_scores[algo][data]]) regrets_std[algo][data] = np.std([s[2] for s in full_scores[algo][data]]) flat = flatten(full_online_scores) def flatten(data): res = {} for algo in data: flat = [] for env in data[algo]: if "avg" not in env: env_list = np.array(data[algo][env])[:, -1] flat.append(env_list) res[algo] = np.array(flat).T return res
null
161,387
import os import pickle import pandas as pd import wandb from tqdm import tqdm def get_run_scores(run_id, is_dt=False): run = api.run(run_id) score_key = None all_scores = [] max_dt = -1e10 for k in run.history().keys(): if "normalized" in k and "score" in k and "std" not in k: if is_dt: st = k if "eval/" in st: st = st.replace("eval/", "") target = float(st.split("_")[0]) if target > max_dt: max_dt = target score_key = k else: score_key = k break for _, row in run.history(keys=[score_key], samples=5000).iterrows(): all_scores.append(row[score_key]) return all_scores full_scores = process_runs(dataframe) def process_runs(df): algorithms = df["algorithm"].unique() datasets = df["dataset"].unique() full_scores = {algo: {ds: [] for ds in datasets} for algo in algorithms} for _, row in tqdm( df.iterrows(), desc="Runs scores downloading", position=0, leave=True ): full_scores[row["algorithm"]][row["dataset"]].append( get_run_scores(row["url"], row["algorithm"] == "DT") ) return full_scores
null
161,388
import importlib import importlib.metadata as importlib_metadata from functools import lru_cache import packaging.version def is_optimum_available() -> bool: return importlib.util.find_spec("optimum") is not None
null
161,389
from __future__ import annotations import os from contextlib import contextmanager from typing import Any, Optional, Union import torch from accelerate.hooks import remove_hook_from_submodules from torch import nn from transformers.utils import PushToHubMixin from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES from .config import PeftConfig from .peft_model import PeftModel from .tuners import ( AdaLoraModel, IA3Model, LoHaModel, LoKrModel, LoraModel, MixedModel, OFTModel, ) from .utils import PeftType, _set_adapter, _set_trainable The provided code snippet includes necessary dependencies for implementing the `_prepare_model_for_gradient_checkpointing` function. Write a Python function `def _prepare_model_for_gradient_checkpointing(model: nn.Module) -> None` to solve the following problem: r""" Prepares the model for gradient checkpointing if necessary Here is the function: def _prepare_model_for_gradient_checkpointing(model: nn.Module) -> None: r""" Prepares the model for gradient checkpointing if necessary """ # Note: same as PeftModel._prepare_model_for_gradient_checkpointing if not getattr(model, "is_gradient_checkpointing", True): return model if not ( getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) or getattr(model, "is_quantized", False) ): if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() elif hasattr(model, "get_input_embeddings"): def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
r""" Prepares the model for gradient checkpointing if necessary
161,390
from __future__ import annotations import os from contextlib import contextmanager from typing import Any, Optional, Union import torch from accelerate.hooks import remove_hook_from_submodules from torch import nn from transformers.utils import PushToHubMixin from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES from .config import PeftConfig from .peft_model import PeftModel from .tuners import ( AdaLoraModel, IA3Model, LoHaModel, LoKrModel, LoraModel, MixedModel, OFTModel, ) from .utils import PeftType, _set_adapter, _set_trainable class PeftConfig(PeftConfigMixin): """ This is the base configuration class to store the configuration of a [`PeftModel`]. Args: peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform. inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode. """ base_model_name_or_path: Optional[str] = field( default=None, metadata={"help": "The name of the base model to use."} ) revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."}) peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"}) task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"}) inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"}) def _check_config_compatible(peft_config: PeftConfig) -> None: if peft_config.peft_type not in COMPATIBLE_TUNER_TYPES: raise ValueError( f"The provided `peft_type` '{peft_config.peft_type.value}' is not compatible with the `PeftMixedModel`. " f"Compatible types are: {COMPATIBLE_TUNER_TYPES}" )
null
161,391
import math from typing import Any, Optional, Set, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from peft.tuners.lycoris_utils import LycorisLayer def make_weight_cp(t, wa, wb): rebuild2 = torch.einsum("i j k l, i p, j r -> p r k l", t, wa, wb) # [c, d, k1, k2] return rebuild2
null
161,392
import math from typing import Any, Optional, Set, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from peft.tuners.lycoris_utils import LycorisLayer def make_kron(w1, w2, scale=1.0): if len(w2.shape) == 4: w1 = w1.unsqueeze(2).unsqueeze(2) w2 = w2.contiguous() rebuild = torch.kron(w1, w2) return rebuild * scale
null
161,393
import math from typing import Any, Set, Tuple import torch import torch.nn as nn import torch.nn.functional as F from peft.tuners.lycoris_utils import LycorisLayer class HadaWeight(torch.autograd.Function): def forward(ctx, w1a, w1b, w2a, w2b, scale=torch.tensor(1)): ctx.save_for_backward(w1a, w1b, w2a, w2b, scale) diff_weight = ((w1a @ w1b) * (w2a @ w2b)) * scale return diff_weight def backward(ctx, grad_out): (w1a, w1b, w2a, w2b, scale) = ctx.saved_tensors grad_out = grad_out * scale temp = grad_out * (w2a @ w2b) grad_w1a = temp @ w1b.T grad_w1b = w1a.T @ temp temp = grad_out * (w1a @ w1b) grad_w2a = temp @ w2b.T grad_w2b = w2a.T @ temp del temp return grad_w1a, grad_w1b, grad_w2a, grad_w2b, None def make_weight(w1a, w1b, w2a, w2b, scale): return HadaWeight.apply(w1a, w1b, w2a, w2b, scale)
null
161,394
import math from typing import Any, Set, Tuple import torch import torch.nn as nn import torch.nn.functional as F from peft.tuners.lycoris_utils import LycorisLayer class HadaWeightCP(torch.autograd.Function): def forward(ctx, t1, w1a, w1b, t2, w2a, w2b, scale=torch.tensor(1)): ctx.save_for_backward(t1, w1a, w1b, t2, w2a, w2b, scale) rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", t1, w1b, w1a) rebuild2 = torch.einsum("i j k l, j r, i p -> p r k l", t2, w2b, w2a) return rebuild1 * rebuild2 * scale def backward(ctx, grad_out): (t1, w1a, w1b, t2, w2a, w2b, scale) = ctx.saved_tensors grad_out = grad_out * scale temp = torch.einsum("i j k l, j r -> i r k l", t2, w2b) rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w2a) grad_w = rebuild * grad_out del rebuild grad_w1a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w) grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w1a.T) del grad_w, temp grad_w1b = torch.einsum("i r k l, i j k l -> r j", t1, grad_temp) grad_t1 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w1b.T) del grad_temp temp = torch.einsum("i j k l, j r -> i r k l", t1, w1b) rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w1a) grad_w = rebuild * grad_out del rebuild grad_w2a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w) grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w2a.T) del grad_w, temp grad_w2b = torch.einsum("i r k l, i j k l -> r j", t2, grad_temp) grad_t2 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w2b.T) del grad_temp return grad_t1, grad_w1a, grad_w1b, grad_t2, grad_w2a, grad_w2b, None def make_weight_cp(t1, w1a, w1b, t2, w2a, w2b, scale): return HadaWeightCP.apply(t1, w1a, w1b, t2, w2a, w2b, scale)
null
161,395
from abc import ABC, abstractmethod import torch from torch import nn from torch.distributions.relaxed_bernoulli import RelaxedBernoulli from .config import PolyConfig class PolyRouter(Router): # It's a simplified implementation of # https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L138 def __init__(self, poly_config: PolyConfig): super().__init__() self.poly_type = poly_config.poly_type self.n_tasks = poly_config.n_tasks self.n_skills = poly_config.n_skills self.n_splits = poly_config.n_splits self.module_logits = nn.Parameter(torch.empty((self.n_tasks, self.n_splits * self.n_skills))) def reset(self): torch.nn.init.uniform_(self.module_logits, -1e-3, 1e-3) def forward(self, task_ids: torch.Tensor, input_ids: torch.Tensor): if task_ids is None: raise ValueError("task_ids should not be None.") if task_ids.max().item() >= self.n_tasks: raise ValueError(f"Only {self.n_tasks} tasks available. Found task id = {task_ids.max().item()}") # move task id to input's device task_ids = task_ids.to(self.module_logits.device) module_logits = self.module_logits[task_ids] module_logits = module_logits.view(-1, self.n_splits, self.n_skills) if self.training: module_logits = RelaxedBernoulli(temperature=1.0, logits=module_logits).rsample() else: module_logits = torch.sigmoid(module_logits) module_weights = module_logits / (module_logits.sum(dim=-1, keepdim=True) + EPS) return module_weights class PolyConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`PolyModel`]. - [Polytropon (Poly)](https://arxiv.org/abs/2202.13914) - [Multi-Head Routing (MHR)](https://arxiv.org/abs/2211.03831) Args: r (`int`): Attention dimension of each Lora in Poly. target_modules (`Union[List[str],str]`): The names of the modules to apply Poly to. modules_to_save (`List[str]`): List of modules apart from Poly layers to be set as trainable and saved in the final checkpoint. init_weights (bool): Whether to perform initialization of Poly weights. poly_type (`Literal["poly"]`): The variant of the Poly module to use. Currently, only "poly" is supported. n_tasks (`int`): The number of tasks in a multitasking scenario. n_skills (`int`): The number of skills (LoRA) in each Poly layer. n_splits (`int`): The number of splits within each LoRA of a Poly layer. A value greater than 1 indicates the use of Multi-Head Routing (MHR). """ r: int = field(default=8, metadata={"help": "Lora attention dimension"}) target_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with Poly." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " }, ) modules_to_save: Optional[List[str]] = field( default=None, metadata={ "help": "List of modules apart from Poly layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) init_weights: bool = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the Poly layers with their default initialization. Don't change " "this setting, except if you know exactly what you're doing." ), }, ) poly_type: Literal["poly"] = field( default="poly", metadata={"help": 'Type of Poly modules to be used. Currently only "poly" is supported.'}, ) n_tasks: int = field( default=1, metadata={"help": "Number of tasks in multitasking scenario."}, ) n_skills: int = field( default=4, metadata={"help": "Number of skills (LoRA) in each Poly layer."}, ) n_splits: int = field( default=1, metadata={"help": "Number of splits within each LoRA of a Poly layer."}, ) def __post_init__(self): self.peft_type = PeftType.POLY self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) def get_router(poly_config: PolyConfig) -> nn.Module: if poly_config.poly_type == "poly": return PolyRouter(poly_config) else: raise ValueError( f"Unsupported poly_type: {poly_config.poly_type}. " "Currently, only the following types are supported: " "`poly`." )
null
161,396
from __future__ import annotations import copy import logging import re import warnings from abc import ABC, abstractmethod from contextlib import contextmanager from typing import Any, Optional, Union import torch from accelerate.hooks import AlignDevicesHook from accelerate.utils import named_module_tensors, offload_state_dict from torch import nn from transformers import PreTrainedModel from transformers.pytorch_utils import Conv1D from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND from ..config import PeftConfig from ..utils import ModulesToSaveWrapper, _get_submodules The provided code snippet includes necessary dependencies for implementing the `onload_layer` function. Write a Python function `def onload_layer(layer)` to solve the following problem: r""" A utility for modifying a module containing one or more tuners and a base layer, any of which are offloaded to the CPU or disk. Moves a module's sub-modules to the execution device before some action is performed, after that the base layer state dictionary is re-assigned (if that layer was offloaded to the disk) and finally the parameters are offloaded. If the module has no offloaded sub-modules, this function does nothing. Args: layer ('torch.nn.Module'): layer with tuners to be merged Here is the function: def onload_layer(layer): r""" A utility for modifying a module containing one or more tuners and a base layer, any of which are offloaded to the CPU or disk. Moves a module's sub-modules to the execution device before some action is performed, after that the base layer state dictionary is re-assigned (if that layer was offloaded to the disk) and finally the parameters are offloaded. If the module has no offloaded sub-modules, this function does nothing. Args: layer ('torch.nn.Module'): layer with tuners to be merged """ offloaded_modules = [] for name, module in layer.named_modules(): if name in ["", "base_layer"]: continue if hasattr(module, "_hf_hook") and isinstance(module._hf_hook, AlignDevicesHook) and module._hf_hook.offload: module._hf_hook.pre_forward(module) offloaded_modules.append(module) base_layer_offload = False if hasattr(layer, "base_layer") and ( hasattr(layer.base_layer, "_hf_hook") and isinstance(layer.base_layer._hf_hook, AlignDevicesHook) and layer.base_layer._hf_hook.offload ): if torch.device("meta") in layer.base_layer._hf_hook.original_devices.values(): # retrieve the name of the original disk-offload directory offload_folder = layer.base_layer._hf_hook.weights_map.dataset.save_folder layer.base_layer._hf_hook.pre_forward(layer.base_layer) base_layer_offload = True yield for module in offloaded_modules: module._hf_hook.post_forward(module, torch.tensor([])) if base_layer_offload: # re-make weights map (must be on cpu to send params to the disk via memmap if disk offload) layer.base_layer._hf_hook.weights_map = { name: param.to("cpu") for name, param in named_module_tensors(layer.base_layer) } # offload weights map to disk if original device is the disk if torch.device("meta") in layer.base_layer._hf_hook.original_devices.values(): # rewrite directory with merged weights offload_state_dict(offload_folder, layer.base_layer._hf_hook.weights_map) layer.base_layer._hf_hook.post_forward(layer.base_layer, torch.tensor([]))
r""" A utility for modifying a module containing one or more tuners and a base layer, any of which are offloaded to the CPU or disk. Moves a module's sub-modules to the execution device before some action is performed, after that the base layer state dictionary is re-assigned (if that layer was offloaded to the disk) and finally the parameters are offloaded. If the module has no offloaded sub-modules, this function does nothing. Args: layer ('torch.nn.Module'): layer with tuners to be merged
161,397
from __future__ import annotations import copy import logging import re import warnings from abc import ABC, abstractmethod from contextlib import contextmanager from typing import Any, Optional, Union import torch from accelerate.hooks import AlignDevicesHook from accelerate.utils import named_module_tensors, offload_state_dict from torch import nn from transformers import PreTrainedModel from transformers.pytorch_utils import Conv1D from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND from ..config import PeftConfig from ..utils import ModulesToSaveWrapper, _get_submodules The provided code snippet includes necessary dependencies for implementing the `check_target_module_exists` function. Write a Python function `def check_target_module_exists(config, key: str) -> bool | re.Match[str] | None` to solve the following problem: A helper method to check if the passed module's key name matches any of the target modules in the adapter_config. Args: config (`LoraConfig` | `LycorisConfig`): A config to match target modules from key (`str`): A key to search any matches in config Returns: `bool` | `re.Match[str]` | `None`: True of match object if key matches any target modules from config, False or None if no match found Here is the function: def check_target_module_exists(config, key: str) -> bool | re.Match[str] | None: """A helper method to check if the passed module's key name matches any of the target modules in the adapter_config. Args: config (`LoraConfig` | `LycorisConfig`): A config to match target modules from key (`str`): A key to search any matches in config Returns: `bool` | `re.Match[str]` | `None`: True of match object if key matches any target modules from config, False or None if no match found """ if isinstance(config.target_modules, str): target_module_found = re.fullmatch(config.target_modules, key) elif key in config.target_modules: # this module is specified directly in target_modules target_module_found = True else: target_module_found = any(key.endswith(f".{target_key}") for target_key in config.target_modules) layer_indexes = getattr(config, "layers_to_transform", None) layers_pattern = getattr(config, "layers_pattern", None) is_using_layer_indexes = layer_indexes is not None and ( len(layer_indexes) != 0 if isinstance(layer_indexes, list) else True ) if is_using_layer_indexes and target_module_found: layer_index = None # TODO: It's still unclear how empty layers_pattern (None, [], or "") should behave # For now, empty layers_pattern means any layer pattern is ok if layers_pattern is None or len(layers_pattern) == 0: layer_index = re.match(r".*\.[^.]*\.(\d+)\.", key) else: layers_pattern = [layers_pattern] if isinstance(layers_pattern, str) else layers_pattern for pattern in layers_pattern: layer_index = re.match(rf".*\.{pattern}\.(\d+)\.", key) if layer_index is not None: break if layer_index is None: target_module_found = False else: layer_index = int(layer_index.group(1)) if isinstance(layer_indexes, int): target_module_found = layer_index == layer_indexes else: target_module_found = layer_index in layer_indexes return target_module_found
A helper method to check if the passed module's key name matches any of the target modules in the adapter_config. Args: config (`LoraConfig` | `LycorisConfig`): A config to match target modules from key (`str`): A key to search any matches in config Returns: `bool` | `re.Match[str]` | `None`: True of match object if key matches any target modules from config, False or None if no match found
161,398
from __future__ import annotations import copy import logging import re import warnings from abc import ABC, abstractmethod from contextlib import contextmanager from typing import Any, Optional, Union import torch from accelerate.hooks import AlignDevicesHook from accelerate.utils import named_module_tensors, offload_state_dict from torch import nn from transformers import PreTrainedModel from transformers.pytorch_utils import Conv1D from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND from ..config import PeftConfig from ..utils import ModulesToSaveWrapper, _get_submodules class BaseTuner(nn.Module, ABC): r""" A base tuner model that provides the common methods and attributes for all tuners that are injectable into a torch.nn.Module For adding a new Tuner class, one needs to overwrite the following methods: - **_prepare_adapter_config**: A private method to eventually prepare the adapter config, for example in case the field `target_modules` is missing. - **_create_and_replace**: A private method to create and replace the target module with the adapter module. - **_check_target_module_exists**: A private helper method to check if the passed module's key name matches any of the target modules in the adapter_config. The easiest is to check what is done in the `peft.tuners.lora.LoraModel` class. Attributes: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. forward (`Callable`): The forward method of the model. peft_config (`Union[`PeftConfig`, dict[str, PeftConfig]]`): The adapter configuration object, it should be a dictionary of `str` to `PeftConfig` objects. One can also pass a PeftConfig object and a new adapter will be created with the default name `adapter` or create a new dictionary with a key `adapter_name` and a value of that peft config. config (`dict[str, Any]`): The model configuration object, it should be a dictionary of `str` to `Any` objects. targeted_module_names (`list[str]`): The list of module names that were actually adapted. Can be useful to inspect if you want to quickly double-check that the `config.target_modules` where specified correctly. """ def __init__(self, model, peft_config: Union[PeftConfig, dict[str, PeftConfig]], adapter_name: str) -> None: super().__init__() self.model = model self.targeted_module_names: list[str] = [] # For advanced developers, if you want to attach multiple adapters to your # model, just add a `peft_config` dict attribute to your model. if not hasattr(self, "peft_config"): self.peft_config = {adapter_name: peft_config} if isinstance(peft_config, PeftConfig) else peft_config else: logger.info( "Already found a `peft_config` attribute in the model. This will lead to having multiple adapters" " in the model. Make sure to know what you are doing!" ) if isinstance(peft_config, PeftConfig): self.peft_config[adapter_name] = peft_config else: # user is adding a dict of PeftConfigs self.peft_config.update(peft_config) self.active_adapter = adapter_name self.inject_adapter(self.model, adapter_name) # Copy the peft_config in the injected model. self.model.peft_config = self.peft_config def active_adapters(self) -> list[str]: if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def forward(self, *args: Any, **kwargs: Any): return self.model.forward(*args, **kwargs) def _prepare_adapter_config(self, peft_config: PeftConfig, model_config: dict) -> PeftConfig: r""" A private method to eventually prepare the adapter config. For transformers based models, if `peft_config.target_modules` is None, we can automatically infer the target modules from the `TRANSFORMERS_MODELS_TO_XXX_TARGET_MODULES_MAPPING`. This method can be further refactored in the future to automatically infer it for all tuner models. Check out `peft.tuner.lora.LoraModel._prepare_adapter_config` for an example. Args: peft_config (`PeftConfig`): The adapter config. model_config (`dict`): The transformers model config, that config should contain the `model_type` key. """ ... def _prepare_model(self, peft_config: PeftConfig, model: nn.Module): r""" A private method to modify the model structure before adapter is applied. See `peft.tuner.lora.LoraModel._prepare_model` for an example. Args: peft_config (`PeftConfig`): The prepared adapter config. model (`nn.Module`): The model that is going to be adapted. """ pass def _check_target_module_exists(peft_config: PeftConfig, key: str) -> bool: r""" A helper private method to check if the passed module's key name matches any of the target modules in the `peft_config.target_modules` list. If it does, return `True`, else return `False`. Args: peft_config (`PeftConfig`): The adapter config. key (`str`): The module's key name. """ ... def _create_and_replace( self, peft_config: PeftConfig, adapter_name: str, target: nn.Module, target_name: str, parent: nn.Module, current_key: str, ) -> None: r""" Inplace replacement of the target module with the adapter layer. This method needs to be overridden by all the tuner classes. Check `peft.tuners.lora.LoraModel._create_and_replace` for an example. Args: peft_config (`PeftConfig`): The adapter config. adapter_name (`str`): The adapter name. target (`nn.Module`): The target module. target_name (`str`): The target module's name. parent (`nn.Module`): The parent module. current_key (`str`): The key of the current target being adapted. """ ... def _mark_only_adapters_as_trainable(self, model: nn.Module): r""" A helper method to mark only the adapter layers as trainable (i.e. module.requires_grad = False) This needs to be overridden for all tuner classes to match the correct key names. Check `peft.tuners.lora.LoraModel._mark_only_adapters_as_trainable` for an example. """ ... def _check_new_adapter_config(self, config: PeftConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ pass def _check_merge_allowed(self): """Helper method to check whether the adapter can be merged. Raise a ValueError if it is not possible to merge the adapter with the given configuration. """ pass def inject_adapter(self, model: nn.Module, adapter_name: str): r""" Creates adapter layers and replaces the target modules with the adapter layers. This method is called under the hood by `peft.mapping.get_peft_model` if a non-prompt tuning adapter class is passed. The corresponding PEFT config is directly retrieved from the `peft_config` attribute of the BaseTuner class. Args: model (`nn.Module`): The model to be tuned. adapter_name (`str`): The adapter name. """ peft_config = self.peft_config[adapter_name] # Note: If possible, all checks should be performed *at the start of this method*. # This way, we can raise early if something goes wrong, without leaving the model # in a bad (half-initialized) state. self._check_new_adapter_config(peft_config) _check_for_modules_to_save = getattr(peft_config, "modules_to_save", None) is not None _has_modules_to_save = False model_config = getattr(model, "config", {"model_type": "custom"}) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() peft_config = self._prepare_adapter_config(peft_config, model_config) self._prepare_model(peft_config, model) is_target_modules_in_base_model = False key_list = [key for key, _ in model.named_modules()] # update peft_config.target_modules if required peft_config = _maybe_include_all_linear_layers(peft_config, model) for key in key_list: # Check for modules_to_save in case if _check_for_modules_to_save and any( key.endswith(f"{module_to_save}") for module_to_save in peft_config.modules_to_save ): # Optionally set the modules to save parent, target, target_name = _get_submodules(model, key) if not isinstance(target, ModulesToSaveWrapper): new_module = ModulesToSaveWrapper(target, adapter_name) setattr(parent, target_name, new_module) else: target.update(adapter_name) _has_modules_to_save = True continue if not self._check_target_module_exists(peft_config, key): continue self.targeted_module_names.append(key) is_target_modules_in_base_model = True parent, target, target_name = _get_submodules(model, key) self._create_and_replace(peft_config, adapter_name, target, target_name, parent, current_key=key) if not is_target_modules_in_base_model: raise ValueError( f"Target modules {peft_config.target_modules} not found in the base model. " f"Please check the target modules and try again." ) self._mark_only_adapters_as_trainable(model) if self.peft_config[adapter_name].inference_mode: for n, p in model.named_parameters(): if adapter_name in n: p.requires_grad = False if _has_modules_to_save: if not hasattr(model, "modules_to_save"): model.modules_to_save = set(peft_config.modules_to_save) else: model.modules_to_save.update(set(peft_config.modules_to_save)) def merge_adapter(self, adapter_names: Optional[list[str]] = None) -> None: """ This method merges the adapter layers into the base model. Merging adapters can lead to a speed up of the forward pass. A copy of the adapter weights is still kept in memory, which is required to unmerge the adapters. In order to merge the adapter weights without keeping them in memory, please call `merge_and_unload`. Args: safe_merge (`bool`, *optional*): If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If `None`, all active adapters will be merged. Defaults to `None`. """ self._check_merge_allowed() for module in self.model.modules(): if isinstance(module, BaseTunerLayer): with onload_layer(module): module.merge(adapter_names=adapter_names) def unmerge_adapter(self): """ This method unmerges all merged adapter layers from the base model. """ for module in self.model.modules(): if isinstance(module, BaseTunerLayer): with onload_layer(module): module.unmerge() def _unloading_checks(self, adapter_names: Optional[list[str]]): adapters_to_consider = adapter_names or self.active_adapters is_modules_to_save_available = any( self.peft_config[adapter].modules_to_save for adapter in adapters_to_consider ) if is_modules_to_save_available and len(adapters_to_consider) > 1: raise ValueError("Cannot unload multiple adapters that specify `modules_to_save`.") The provided code snippet includes necessary dependencies for implementing the `inspect_matched_modules` function. Write a Python function `def inspect_matched_modules(tuner: BaseTuner, adapter_name: str = "default") -> dict` to solve the following problem: A helper function to inspect the set of matched and unmatched modules for a PEFT model and the given adapter. Here is the function: def inspect_matched_modules(tuner: BaseTuner, adapter_name: str = "default") -> dict: """ A helper function to inspect the set of matched and unmatched modules for a PEFT model and the given adapter. """ config = tuner.peft_config[adapter_name] key_list = [key for key, _ in tuner.model.named_modules()] module_dict = {"matched": [], "unmatched": []} for key in key_list: if tuner._check_target_module_exists(config, key): module_dict["matched"].append(key) else: module_dict["unmatched"].append(key) return module_dict
A helper function to inspect the set of matched and unmatched modules for a PEFT model and the given adapter.
161,399
from __future__ import annotations import copy import logging import re import warnings from abc import ABC, abstractmethod from contextlib import contextmanager from typing import Any, Optional, Union import torch from accelerate.hooks import AlignDevicesHook from accelerate.utils import named_module_tensors, offload_state_dict from torch import nn from transformers import PreTrainedModel from transformers.pytorch_utils import Conv1D from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND from ..config import PeftConfig from ..utils import ModulesToSaveWrapper, _get_submodules class PeftConfig(PeftConfigMixin): """ This is the base configuration class to store the configuration of a [`PeftModel`]. Args: peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform. inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode. """ base_model_name_or_path: Optional[str] = field( default=None, metadata={"help": "The name of the base model to use."} ) revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."}) peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"}) task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"}) inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"}) The provided code snippet includes necessary dependencies for implementing the `_maybe_include_all_linear_layers` function. Write a Python function `def _maybe_include_all_linear_layers(peft_config: PeftConfig, model: nn.Module) -> PeftConfig` to solve the following problem: Helper function to update `target_modules` to all linear/Conv1D layers if provided as 'all-linear'. Adapted from the QLoRA repository: https://github.com/artidoro/qlora/blob/main/qlora.py Here is the function: def _maybe_include_all_linear_layers(peft_config: PeftConfig, model: nn.Module) -> PeftConfig: """ Helper function to update `target_modules` to all linear/Conv1D layers if provided as 'all-linear'. Adapted from the QLoRA repository: https://github.com/artidoro/qlora/blob/main/qlora.py """ # if `target_modules` is a string, convert to lower case and check if it matches "all-linear" if not ( isinstance(peft_config.target_modules, str) and peft_config.target_modules.lower() == INCLUDE_LINEAR_LAYERS_SHORTHAND ): return peft_config if not isinstance(model, PreTrainedModel): raise ValueError( f"Only instances of PreTrainedModel support `target_modules={INCLUDE_LINEAR_LAYERS_SHORTHAND!r}`" ) linear_classes = (torch.nn.Linear, Conv1D) linear_module_names = set() for name, module in model.named_modules(): # match with all linear classes. if isinstance(module, linear_classes): names = name.rsplit(".", 1)[-1] # get the base name linear_module_names.add(names) # ignore the last classification head for text generation models output_emb = model.get_output_embeddings() if output_emb is not None: last_module_name = [name for name, module in model.named_modules() if module is output_emb][0] linear_module_names -= {last_module_name} peft_config.target_modules = linear_module_names return peft_config
Helper function to update `target_modules` to all linear/Conv1D layers if provided as 'all-linear'. Adapted from the QLoRA repository: https://github.com/artidoro/qlora/blob/main/qlora.py
161,400
from __future__ import annotations import copy import logging import re import warnings from abc import ABC, abstractmethod from contextlib import contextmanager from typing import Any, Optional, Union import torch from accelerate.hooks import AlignDevicesHook from accelerate.utils import named_module_tensors, offload_state_dict from torch import nn from transformers import PreTrainedModel from transformers.pytorch_utils import Conv1D from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND from ..config import PeftConfig from ..utils import ModulesToSaveWrapper, _get_submodules class BaseTunerLayer(ABC): r""" A tuner layer mixin that provides the common methods and attributes for all tuners. Args: is_pluggable (`bool`, *optional*): Whether the adapter layer can be plugged to any pytorch module active_adapters (Union[List[`str`], `str`], *optional*): The name of the active adapter. """ active_adapter = None # All names of layers that may contain adapter (trainable) weights adapter_layer_names: tuple[str] = () # All names of other parameters that may contain adapter-related parameters other_param_names: tuple[str] = () # indicates whether all adapters should be disabled _disable_adapters: bool = False # the currently active adapter(s) _active_adapter: str | list[str] = "default" # List all merged adapters merged_adapters: list[str] = [] def get_base_layer(self) -> nn.Module: """ (Recursively) get the base_layer. This is necessary for the case that the tuner layer wraps another tuner layer. """ base_layer = self while hasattr(base_layer, "base_layer"): base_layer = base_layer.base_layer return base_layer def weight(self) -> torch.Tensor: # This is required for some transformers code, e.g. for T5, weight is accessed as: # self.wo.weight # where "wo" is the adapter layer. # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers # /models/t5/modeling_t5.py#L292 base_layer = self.get_base_layer() if hasattr(base_layer, "qweight"): # QuantLinear weight = base_layer.qweight else: # Other layers weight = base_layer.weight return weight def bias(self) -> torch.Tensor: base_layer = self.get_base_layer() return base_layer.bias def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: raise NotImplementedError def unmerge(self) -> None: raise NotImplementedError def merged(self) -> bool: return bool(self.merged_adapters) def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def active_adapters(self): if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def enable_adapters(self, enabled: bool) -> None: """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if enabled: self.set_adapter(self.active_adapters) self._disable_adapters = False else: # disable grads on all adapter layers for layer_name in self.adapter_layer_names: layer = getattr(self, layer_name) layer.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_names: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated. """ if isinstance(adapter_names, str): adapter_names = [adapter_names] # Deactivate grads on the inactive adapter and activate grads on the active adapter for layer_name in self.adapter_layer_names: module_dict = getattr(self, layer_name) for key, layer in module_dict.items(): if key in adapter_names: # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may # happen if a completely different adapter layer is being activated. layer.requires_grad_(True) else: layer.requires_grad_(False) self._active_adapter = adapter_names def _all_available_adapter_names(self) -> list[str]: """Return a sorted list of all available adapter names""" adapter_names = set() for name in self.adapter_layer_names + self.other_param_names: # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter # names attr = getattr(self, name) if hasattr(attr, "keys"): adapter_names.update(attr.keys()) return sorted(adapter_names) def delete_adapter(self, adapter_name: str) -> None: """ Delete an adapter from the layer This should be called on all adapter layers, or else we will get an inconsistent state. This method will also set a new active adapter if the deleted adapter was an active adapter. It is important that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. Args: adapter_name (`str`): The name of the adapter to delete """ for attr in self.adapter_layer_names + self.other_param_names: if adapter_name in getattr(self, attr): del getattr(self, attr)[adapter_name] if adapter_name in self.active_adapters: # choose a new active adapter active_adapters = self.active_adapters[:] active_adapters.remove(adapter_name) if active_adapters: self.set_adapter(active_adapters) else: # no active adapters left, set a new default adapter # here we get the list of all adapters existing adapter names and choose the first one remaining_adapters = self._all_available_adapter_names() if not remaining_adapters: self.set_adapter([]) else: new_active_adapter = remaining_adapters[0] warnings.warn( f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " f"{new_active_adapter}." ) self.set_adapter(remaining_adapters[0]) The provided code snippet includes necessary dependencies for implementing the `check_adapters_to_merge` function. Write a Python function `def check_adapters_to_merge(module: BaseTunerLayer, adapter_names: Optional[list[str]] = None) -> list[str]` to solve the following problem: Helper function to check which adapters should be merged. Only return those adapters that are not already merged. Give a warning if some or all of the adapters are already merged. Here is the function: def check_adapters_to_merge(module: BaseTunerLayer, adapter_names: Optional[list[str]] = None) -> list[str]: """ Helper function to check which adapters should be merged. Only return those adapters that are not already merged. Give a warning if some or all of the adapters are already merged. """ if adapter_names is None: adapter_names = module.active_adapters if module.merged: merged_adapters = set(module.merged_adapters) adapter_names = [name for name in adapter_names if name not in merged_adapters] if adapter_names: warnings.warn( f"Already following adapters were merged {','.join(module.merged_adapters)}. " f"You are now additionally merging {','.join(adapter_names)}." ) else: warnings.warn("All adapters are already merged, nothing to do.") return adapter_names
Helper function to check which adapters should be merged. Only return those adapters that are not already merged. Give a warning if some or all of the adapters are already merged.
161,401
from __future__ import annotations import copy import logging import re import warnings from abc import ABC, abstractmethod from contextlib import contextmanager from typing import Any, Optional, Union import torch from accelerate.hooks import AlignDevicesHook from accelerate.utils import named_module_tensors, offload_state_dict from torch import nn from transformers import PreTrainedModel from transformers.pytorch_utils import Conv1D from peft.utils import INCLUDE_LINEAR_LAYERS_SHORTHAND from ..config import PeftConfig from ..utils import ModulesToSaveWrapper, _get_submodules def clone_module(module: nn.Module, share_weights=False): """Clone a module in a pytorch model. Clones a module of a model, optionally sharing all the parameters between the original and the clone. Simplifies reusing a module when manipulating the architecture of a model. """ clone = copy.deepcopy(module) def _share_weights(src: nn.Module, dst: nn.Module): for name, param in src.named_parameters(recurse=False): dst.register_parameter(name, param) if share_weights: for name, submodule in module.named_modules(): _share_weights(submodule, clone.get_submodule(name)) return clone The provided code snippet includes necessary dependencies for implementing the `replicate_layers` function. Write a Python function `def replicate_layers(model: nn.Module, layer_map: list[tuple[int, int]])` to solve the following problem: Replicate layers in a transfomer model with weight sharing. This function looks for a module list attribute at model[(.model)*].layers and replicates the layers in the module list according to the layer map. For example the map `[[0, 4], [2, 5]]` will take the set of layers `[0, 1, 2, 3, 4]` and replace them with a module list containing `[0, 1, 2, 3, 2, 3, 4]`. Here is the function: def replicate_layers(model: nn.Module, layer_map: list[tuple[int, int]]): """Replicate layers in a transfomer model with weight sharing. This function looks for a module list attribute at model[(.model)*].layers and replicates the layers in the module list according to the layer map. For example the map `[[0, 4], [2, 5]]` will take the set of layers `[0, 1, 2, 3, 4]` and replace them with a module list containing `[0, 1, 2, 3, 2, 3, 4]`. """ while hasattr(model, "model"): model = model.model # Some variants of the bert model nest the main model under the bert attribute. if hasattr(model, "bert"): model = model.bert model_type = None layers: nn.ModuleList = None if hasattr(model, "layers"): model_type = "llama" layers = model.layers elif hasattr(model, "encoder") and hasattr(model.encoder, "layer"): model_type = "bert" layers = model.encoder.layer elif hasattr(model, "h"): model_type = "falcon" layers = model.h if not model_type or not isinstance(layers, nn.ModuleList): raise ValueError( "Could not locate the layers attribute in the model. " "Expected Llama, Bert or Falcon compatible architectures." ) new_layers = [] for start, end in layer_map: for i in range(start, end): current_idx = len(new_layers) new_layers.append(clone_module(layers[i], share_weights=True)) # This is a hack needed to work around the layer_idx introduced in HF transformers. for submodule in new_layers[-1].modules(): if hasattr(submodule, "layer_idx"): submodule.layer_idx = current_idx layers = nn.ModuleList(new_layers) if model_type == "llama": model.layers = layers elif model_type == "bert": model.encoder.layer = layers elif model_type == "falcon": model.h = layers else: raise ValueError("Unexpected model type, need to handle post-processing of layers.") if hasattr(model.config, "num_hidden_layers"): # Common to Llama, Bert, Falcon. model.config.num_hidden_layers = len(new_layers)
Replicate layers in a transfomer model with weight sharing. This function looks for a module list attribute at model[(.model)*].layers and replicates the layers in the module list according to the layer map. For example the map `[[0, 4], [2, 5]]` will take the set of layers `[0, 1, 2, 3, 4]` and replace them with a module list containing `[0, 1, 2, 3, 2, 3, 4]`.
161,402
import importlib.metadata as importlib_metadata from typing import Any, Optional import packaging.version import torch from peft.import_utils import is_auto_awq_available from peft.tuners.lora.layer import LoraLayer from peft.tuners.tuners_utils import BaseTunerLayer if is_auto_awq_available(): from awq.modules.linear import WQLinear_GEMM class AwqLoraLinear(torch.nn.Module, LoraLayer): def __init__( self, base_layer, adapter_name, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, use_rslora: bool = False, **kwargs, ): super().__init__() LoraLayer.__init__(self, base_layer) # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter # for backwards compatibility self.quant_linear_module = base_layer self._active_adapter = adapter_name self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora) def forward(self, x: torch.Tensor): result = self.quant_linear_module(x) if self.disable_adapters: return result for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype x = x.to(lora_A.weight.dtype) output = lora_B(lora_A(dropout(x))) if requires_conversion: output = output.to(expected_dtype) output = output * scaling result = result + output return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep def is_auto_awq_available(): return importlib.util.find_spec("awq") is not None class BaseTunerLayer(ABC): r""" A tuner layer mixin that provides the common methods and attributes for all tuners. Args: is_pluggable (`bool`, *optional*): Whether the adapter layer can be plugged to any pytorch module active_adapters (Union[List[`str`], `str`], *optional*): The name of the active adapter. """ active_adapter = None # All names of layers that may contain adapter (trainable) weights adapter_layer_names: tuple[str] = () # All names of other parameters that may contain adapter-related parameters other_param_names: tuple[str] = () # indicates whether all adapters should be disabled _disable_adapters: bool = False # the currently active adapter(s) _active_adapter: str | list[str] = "default" # List all merged adapters merged_adapters: list[str] = [] def get_base_layer(self) -> nn.Module: """ (Recursively) get the base_layer. This is necessary for the case that the tuner layer wraps another tuner layer. """ base_layer = self while hasattr(base_layer, "base_layer"): base_layer = base_layer.base_layer return base_layer def weight(self) -> torch.Tensor: # This is required for some transformers code, e.g. for T5, weight is accessed as: # self.wo.weight # where "wo" is the adapter layer. # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers # /models/t5/modeling_t5.py#L292 base_layer = self.get_base_layer() if hasattr(base_layer, "qweight"): # QuantLinear weight = base_layer.qweight else: # Other layers weight = base_layer.weight return weight def bias(self) -> torch.Tensor: base_layer = self.get_base_layer() return base_layer.bias def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: raise NotImplementedError def unmerge(self) -> None: raise NotImplementedError def merged(self) -> bool: return bool(self.merged_adapters) def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def active_adapters(self): if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def enable_adapters(self, enabled: bool) -> None: """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if enabled: self.set_adapter(self.active_adapters) self._disable_adapters = False else: # disable grads on all adapter layers for layer_name in self.adapter_layer_names: layer = getattr(self, layer_name) layer.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_names: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated. """ if isinstance(adapter_names, str): adapter_names = [adapter_names] # Deactivate grads on the inactive adapter and activate grads on the active adapter for layer_name in self.adapter_layer_names: module_dict = getattr(self, layer_name) for key, layer in module_dict.items(): if key in adapter_names: # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may # happen if a completely different adapter layer is being activated. layer.requires_grad_(True) else: layer.requires_grad_(False) self._active_adapter = adapter_names def _all_available_adapter_names(self) -> list[str]: """Return a sorted list of all available adapter names""" adapter_names = set() for name in self.adapter_layer_names + self.other_param_names: # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter # names attr = getattr(self, name) if hasattr(attr, "keys"): adapter_names.update(attr.keys()) return sorted(adapter_names) def delete_adapter(self, adapter_name: str) -> None: """ Delete an adapter from the layer This should be called on all adapter layers, or else we will get an inconsistent state. This method will also set a new active adapter if the deleted adapter was an active adapter. It is important that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. Args: adapter_name (`str`): The name of the adapter to delete """ for attr in self.adapter_layer_names + self.other_param_names: if adapter_name in getattr(self, attr): del getattr(self, attr)[adapter_name] if adapter_name in self.active_adapters: # choose a new active adapter active_adapters = self.active_adapters[:] active_adapters.remove(adapter_name) if active_adapters: self.set_adapter(active_adapters) else: # no active adapters left, set a new default adapter # here we get the list of all adapters existing adapter names and choose the first one remaining_adapters = self._all_available_adapter_names() if not remaining_adapters: self.set_adapter([]) else: new_active_adapter = remaining_adapters[0] warnings.warn( f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " f"{new_active_adapter}." ) self.set_adapter(remaining_adapters[0]) def dispatch_awq( target: torch.nn.Module, adapter_name: str, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if is_auto_awq_available() and isinstance(target_base_layer, WQLinear_GEMM): # Raise the error only at the dispatch level AUTOAWQ_MINIMUM_VERSION = packaging.version.parse("0.2.0") version_autoawq = packaging.version.parse(importlib_metadata.version("autoawq")) if AUTOAWQ_MINIMUM_VERSION > version_autoawq: raise ImportError( f"Found an incompatible version of auto-awq. Found version {version_autoawq}, " f"but only versions above {AUTOAWQ_MINIMUM_VERSION} are supported for PEFT." ) new_module = AwqLoraLinear(target, adapter_name, **kwargs) target.qweight = target_base_layer.qweight return new_module
null
161,403
from typing import Any, Optional import torch from peft.tuners.lora.layer import LoraLayer from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import get_auto_gptq_quant_linear class QuantLinear(torch.nn.Module, LoraLayer): def __init__( self, base_layer, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ): super().__init__() LoraLayer.__init__(self, base_layer) if use_dora: raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False") # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter # for backwards compatibility self.quant_linear_module = base_layer self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, ) def forward(self, x: torch.Tensor): # note: logic differs from default Linear because merging is not supported result = self.quant_linear_module(x) if self.disable_adapters: return result for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype x = x.to(lora_A.weight.dtype) output = lora_B(lora_A(dropout(x))) if requires_conversion: output = output.to(expected_dtype) output = output * scaling result += output return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep # TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102 # def reset_lora_parameters(self, adapter_name): # if adapter_name in self.lora_A.keys(): # torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight) # torch.nn.init.zeros_(self.lora_B[adapter_name].weight) class BaseTunerLayer(ABC): r""" A tuner layer mixin that provides the common methods and attributes for all tuners. Args: is_pluggable (`bool`, *optional*): Whether the adapter layer can be plugged to any pytorch module active_adapters (Union[List[`str`], `str`], *optional*): The name of the active adapter. """ active_adapter = None # All names of layers that may contain adapter (trainable) weights adapter_layer_names: tuple[str] = () # All names of other parameters that may contain adapter-related parameters other_param_names: tuple[str] = () # indicates whether all adapters should be disabled _disable_adapters: bool = False # the currently active adapter(s) _active_adapter: str | list[str] = "default" # List all merged adapters merged_adapters: list[str] = [] def get_base_layer(self) -> nn.Module: """ (Recursively) get the base_layer. This is necessary for the case that the tuner layer wraps another tuner layer. """ base_layer = self while hasattr(base_layer, "base_layer"): base_layer = base_layer.base_layer return base_layer def weight(self) -> torch.Tensor: # This is required for some transformers code, e.g. for T5, weight is accessed as: # self.wo.weight # where "wo" is the adapter layer. # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers # /models/t5/modeling_t5.py#L292 base_layer = self.get_base_layer() if hasattr(base_layer, "qweight"): # QuantLinear weight = base_layer.qweight else: # Other layers weight = base_layer.weight return weight def bias(self) -> torch.Tensor: base_layer = self.get_base_layer() return base_layer.bias def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: raise NotImplementedError def unmerge(self) -> None: raise NotImplementedError def merged(self) -> bool: return bool(self.merged_adapters) def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def active_adapters(self): if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def enable_adapters(self, enabled: bool) -> None: """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if enabled: self.set_adapter(self.active_adapters) self._disable_adapters = False else: # disable grads on all adapter layers for layer_name in self.adapter_layer_names: layer = getattr(self, layer_name) layer.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_names: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated. """ if isinstance(adapter_names, str): adapter_names = [adapter_names] # Deactivate grads on the inactive adapter and activate grads on the active adapter for layer_name in self.adapter_layer_names: module_dict = getattr(self, layer_name) for key, layer in module_dict.items(): if key in adapter_names: # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may # happen if a completely different adapter layer is being activated. layer.requires_grad_(True) else: layer.requires_grad_(False) self._active_adapter = adapter_names def _all_available_adapter_names(self) -> list[str]: """Return a sorted list of all available adapter names""" adapter_names = set() for name in self.adapter_layer_names + self.other_param_names: # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter # names attr = getattr(self, name) if hasattr(attr, "keys"): adapter_names.update(attr.keys()) return sorted(adapter_names) def delete_adapter(self, adapter_name: str) -> None: """ Delete an adapter from the layer This should be called on all adapter layers, or else we will get an inconsistent state. This method will also set a new active adapter if the deleted adapter was an active adapter. It is important that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. Args: adapter_name (`str`): The name of the adapter to delete """ for attr in self.adapter_layer_names + self.other_param_names: if adapter_name in getattr(self, attr): del getattr(self, attr)[adapter_name] if adapter_name in self.active_adapters: # choose a new active adapter active_adapters = self.active_adapters[:] active_adapters.remove(adapter_name) if active_adapters: self.set_adapter(active_adapters) else: # no active adapters left, set a new default adapter # here we get the list of all adapters existing adapter names and choose the first one remaining_adapters = self._all_available_adapter_names() if not remaining_adapters: self.set_adapter([]) else: new_active_adapter = remaining_adapters[0] warnings.warn( f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " f"{new_active_adapter}." ) self.set_adapter(remaining_adapters[0]) def dispatch_gptq( target: torch.nn.Module, adapter_name: str, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target gptq_quantization_config = kwargs.get("gptq_quantization_config", None) AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config) if AutoGPTQQuantLinear is not None and isinstance(target_base_layer, AutoGPTQQuantLinear): new_module = QuantLinear(target, adapter_name, **kwargs) target.qweight = target_base_layer.qweight return new_module
null
161,404
import warnings from typing import List, Optional import bitsandbytes as bnb import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils.integrations import dequantize_bnb_weight from peft.utils.other import transpose from .layer import LoraLayer if is_bnb_available(): class Linear8bitLt(torch.nn.Module, LoraLayer): # Lora implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) self.fan_in_fan_out = False self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, ) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter not in self.lora_A.keys(): continue warnings.warn( "Merge lora module to 8-bit linear may get different generations due to rounding errors." ) lora_data = self.get_delta_weight(active_adapter) weight = self.get_base_layer().weight state = self.get_base_layer().state if state.SCB is None: state.SCB = weight.SCB # Dequantize the result of identity matrix and int8 weight because bitsandbytes does not support int8 # dequantization directly output = dequantize_bnb_weight(weight, state=state) if not self.use_dora[active_adapter]: w_data = output.to(lora_data.dtype).to(lora_data.device) + lora_data else: # handle dora # since output already includes scaling, set it to 1 here weight_norm = self._get_weight_norm(output, lora_data, scaling=1).detach() # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm w_data = dora_factor.view(-1, 1) * (output + lora_data) if safe_merge and not torch.isfinite(w_data).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) self.get_base_layer().weight = bnb.nn.Int8Params( w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights ).to(weight.device) state.reset_grads() self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter not in self.lora_A.keys(): continue warnings.warn( "Unmerge lora module to 8-bit linear may get different generations due to rounding errors." ) lora_data = self.get_delta_weight(active_adapter) weight = self.get_base_layer().weight state = self.get_base_layer().state if state.SCB is None: state.SCB = weight.SCB output = dequantize_bnb_weight(weight, state=state) if not self.use_dora[active_adapter]: w_data = output.to(lora_data.dtype).to(lora_data.device) - lora_data else: weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm w_data = output.data / dora_factor.view(-1, 1) - lora_data self.get_base_layer().weight = bnb.nn.Int8Params( w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights ).to(weight.device) state.reset_grads() def get_delta_weight(self, adapter): return ( transpose( self.lora_B[adapter].weight @ self.lora_A[adapter].weight, False, ) * self.scaling[adapter] ) def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype compute_dtype = lora_A.weight.dtype if x.dtype != compute_dtype: x = x.to(compute_dtype) if not self.use_dora[active_adapter]: output = lora_B(lora_A(dropout(x))) * scaling else: output = self._apply_dora(x, lora_A, lora_B, scaling, active_adapter) if requires_conversion: output = output.to(expected_dtype) result = result + output return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class BaseTunerLayer(ABC): r""" A tuner layer mixin that provides the common methods and attributes for all tuners. Args: is_pluggable (`bool`, *optional*): Whether the adapter layer can be plugged to any pytorch module active_adapters (Union[List[`str`], `str`], *optional*): The name of the active adapter. """ active_adapter = None # All names of layers that may contain adapter (trainable) weights adapter_layer_names: tuple[str] = () # All names of other parameters that may contain adapter-related parameters other_param_names: tuple[str] = () # indicates whether all adapters should be disabled _disable_adapters: bool = False # the currently active adapter(s) _active_adapter: str | list[str] = "default" # List all merged adapters merged_adapters: list[str] = [] def get_base_layer(self) -> nn.Module: """ (Recursively) get the base_layer. This is necessary for the case that the tuner layer wraps another tuner layer. """ base_layer = self while hasattr(base_layer, "base_layer"): base_layer = base_layer.base_layer return base_layer def weight(self) -> torch.Tensor: # This is required for some transformers code, e.g. for T5, weight is accessed as: # self.wo.weight # where "wo" is the adapter layer. # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers # /models/t5/modeling_t5.py#L292 base_layer = self.get_base_layer() if hasattr(base_layer, "qweight"): # QuantLinear weight = base_layer.qweight else: # Other layers weight = base_layer.weight return weight def bias(self) -> torch.Tensor: base_layer = self.get_base_layer() return base_layer.bias def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: raise NotImplementedError def unmerge(self) -> None: raise NotImplementedError def merged(self) -> bool: return bool(self.merged_adapters) def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def active_adapters(self): if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def enable_adapters(self, enabled: bool) -> None: """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if enabled: self.set_adapter(self.active_adapters) self._disable_adapters = False else: # disable grads on all adapter layers for layer_name in self.adapter_layer_names: layer = getattr(self, layer_name) layer.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_names: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated. """ if isinstance(adapter_names, str): adapter_names = [adapter_names] # Deactivate grads on the inactive adapter and activate grads on the active adapter for layer_name in self.adapter_layer_names: module_dict = getattr(self, layer_name) for key, layer in module_dict.items(): if key in adapter_names: # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may # happen if a completely different adapter layer is being activated. layer.requires_grad_(True) else: layer.requires_grad_(False) self._active_adapter = adapter_names def _all_available_adapter_names(self) -> list[str]: """Return a sorted list of all available adapter names""" adapter_names = set() for name in self.adapter_layer_names + self.other_param_names: # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter # names attr = getattr(self, name) if hasattr(attr, "keys"): adapter_names.update(attr.keys()) return sorted(adapter_names) def delete_adapter(self, adapter_name: str) -> None: """ Delete an adapter from the layer This should be called on all adapter layers, or else we will get an inconsistent state. This method will also set a new active adapter if the deleted adapter was an active adapter. It is important that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. Args: adapter_name (`str`): The name of the adapter to delete """ for attr in self.adapter_layer_names + self.other_param_names: if adapter_name in getattr(self, attr): del getattr(self, attr)[adapter_name] if adapter_name in self.active_adapters: # choose a new active adapter active_adapters = self.active_adapters[:] active_adapters.remove(adapter_name) if active_adapters: self.set_adapter(active_adapters) else: # no active adapters left, set a new default adapter # here we get the list of all adapters existing adapter names and choose the first one remaining_adapters = self._all_available_adapter_names() if not remaining_adapters: self.set_adapter([]) else: new_active_adapter = remaining_adapters[0] warnings.warn( f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " f"{new_active_adapter}." ) self.set_adapter(remaining_adapters[0]) def dispatch_bnb_8bit(target: torch.nn.Module, adapter_name: str, **kwargs): new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target loaded_in_8bit = kwargs.get("loaded_in_8bit", False) if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): eightbit_kwargs = kwargs.copy() eightbit_kwargs.update( { "has_fp16_weights": target.state.has_fp16_weights, "memory_efficient_backward": target.state.memory_efficient_backward, "threshold": target.state.threshold, "index": target.index, } ) new_module = Linear8bitLt(target, adapter_name, **eightbit_kwargs) return new_module
null
161,405
import warnings from typing import List, Optional import bitsandbytes as bnb import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils.integrations import dequantize_bnb_weight from peft.utils.other import transpose from .layer import LoraLayer if is_bnb_4bit_available(): class Linear4bit(torch.nn.Module, LoraLayer): # Lora implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) self.fan_in_fan_out = False self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, ) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter not in self.lora_A.keys(): continue warnings.warn( "Merge lora module to 4-bit linear may get different generations due to rounding errors." ) # Refer to https://gist.github.com/ChrisHayduk/1a53463331f52dca205e55982baf9930 weight = self.get_base_layer().weight kwargs = weight.__dict__ lora_data = self.get_delta_weight(active_adapter) output = dequantize_bnb_weight(weight, state=weight.quant_state) if not self.use_dora[active_adapter]: w_data = output + lora_data else: # handle dora # since output already includes scaling, set it to 1 here weight_norm = self._get_weight_norm(output, lora_data, scaling=1).detach() # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm w_data = dora_factor.view(-1, 1) * (output + lora_data) if safe_merge and not torch.isfinite(w_data).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) if "bnb_quantized" in kwargs: kwargs["bnb_quantized"] = False self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to( weight.device ) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter not in self.lora_A.keys(): continue warnings.warn( "Unmerge lora module to 4-bit linear may get different generations due to rounding errors." ) lora_data = self.get_delta_weight(active_adapter) weight = self.get_base_layer().weight kwargs = weight.__dict__ output = dequantize_bnb_weight(weight, state=weight.quant_state) if not self.use_dora[active_adapter]: w_data = output - lora_data else: weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm w_data = output.data / dora_factor.view(-1, 1) - lora_data if "bnb_quantized" in kwargs: kwargs["bnb_quantized"] = False self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to( weight.device ) def get_delta_weight(self, adapter): return ( transpose( self.lora_B[adapter].weight @ self.lora_A[adapter].weight, False, ) * self.scaling[adapter] ) def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) # As per Tim Dettmers, for 4bit, we need to defensively clone here. # The reason is that in some cases, an error can occur that backprop # does not work on a manipulated view. This issue may be solved with # newer PyTorch versions but this would need extensive testing to be # sure. result = result.clone() for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype x = x.to(lora_A.weight.dtype) if not self.use_dora[active_adapter]: output = lora_B(lora_A(dropout(x))) * scaling else: output = self._apply_dora(x, lora_A, lora_B, scaling, active_adapter) if requires_conversion: output = output.to(expected_dtype) result = result + output return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep def is_bnb_4bit_available() -> bool: if not is_bnb_available(): return False import bitsandbytes as bnb return hasattr(bnb.nn, "Linear4bit") class BaseTunerLayer(ABC): r""" A tuner layer mixin that provides the common methods and attributes for all tuners. Args: is_pluggable (`bool`, *optional*): Whether the adapter layer can be plugged to any pytorch module active_adapters (Union[List[`str`], `str`], *optional*): The name of the active adapter. """ active_adapter = None # All names of layers that may contain adapter (trainable) weights adapter_layer_names: tuple[str] = () # All names of other parameters that may contain adapter-related parameters other_param_names: tuple[str] = () # indicates whether all adapters should be disabled _disable_adapters: bool = False # the currently active adapter(s) _active_adapter: str | list[str] = "default" # List all merged adapters merged_adapters: list[str] = [] def get_base_layer(self) -> nn.Module: """ (Recursively) get the base_layer. This is necessary for the case that the tuner layer wraps another tuner layer. """ base_layer = self while hasattr(base_layer, "base_layer"): base_layer = base_layer.base_layer return base_layer def weight(self) -> torch.Tensor: # This is required for some transformers code, e.g. for T5, weight is accessed as: # self.wo.weight # where "wo" is the adapter layer. # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers # /models/t5/modeling_t5.py#L292 base_layer = self.get_base_layer() if hasattr(base_layer, "qweight"): # QuantLinear weight = base_layer.qweight else: # Other layers weight = base_layer.weight return weight def bias(self) -> torch.Tensor: base_layer = self.get_base_layer() return base_layer.bias def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: raise NotImplementedError def unmerge(self) -> None: raise NotImplementedError def merged(self) -> bool: return bool(self.merged_adapters) def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def active_adapters(self): if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def enable_adapters(self, enabled: bool) -> None: """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if enabled: self.set_adapter(self.active_adapters) self._disable_adapters = False else: # disable grads on all adapter layers for layer_name in self.adapter_layer_names: layer = getattr(self, layer_name) layer.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_names: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated. """ if isinstance(adapter_names, str): adapter_names = [adapter_names] # Deactivate grads on the inactive adapter and activate grads on the active adapter for layer_name in self.adapter_layer_names: module_dict = getattr(self, layer_name) for key, layer in module_dict.items(): if key in adapter_names: # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may # happen if a completely different adapter layer is being activated. layer.requires_grad_(True) else: layer.requires_grad_(False) self._active_adapter = adapter_names def _all_available_adapter_names(self) -> list[str]: """Return a sorted list of all available adapter names""" adapter_names = set() for name in self.adapter_layer_names + self.other_param_names: # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter # names attr = getattr(self, name) if hasattr(attr, "keys"): adapter_names.update(attr.keys()) return sorted(adapter_names) def delete_adapter(self, adapter_name: str) -> None: """ Delete an adapter from the layer This should be called on all adapter layers, or else we will get an inconsistent state. This method will also set a new active adapter if the deleted adapter was an active adapter. It is important that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. Args: adapter_name (`str`): The name of the adapter to delete """ for attr in self.adapter_layer_names + self.other_param_names: if adapter_name in getattr(self, attr): del getattr(self, attr)[adapter_name] if adapter_name in self.active_adapters: # choose a new active adapter active_adapters = self.active_adapters[:] active_adapters.remove(adapter_name) if active_adapters: self.set_adapter(active_adapters) else: # no active adapters left, set a new default adapter # here we get the list of all adapters existing adapter names and choose the first one remaining_adapters = self._all_available_adapter_names() if not remaining_adapters: self.set_adapter([]) else: new_active_adapter = remaining_adapters[0] warnings.warn( f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " f"{new_active_adapter}." ) self.set_adapter(remaining_adapters[0]) def dispatch_bnb_4bit(target: torch.nn.Module, adapter_name: str, **kwargs): new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target loaded_in_4bit = kwargs.get("loaded_in_4bit", False) if loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit): fourbit_kwargs = kwargs.copy() fourbit_kwargs.update( { "compute_dtype": target_base_layer.compute_dtype, "compress_statistics": target_base_layer.weight.compress_statistics, "quant_type": target_base_layer.weight.quant_type, } ) new_module = Linear4bit(target, adapter_name, **fourbit_kwargs) return new_module
null
161,406
from typing import Any, Optional import torch from peft.import_utils import is_aqlm_available from peft.tuners.lora.layer import LoraLayer from peft.tuners.tuners_utils import BaseTunerLayer if is_aqlm_available(): from aqlm import QuantizedLinear class AqlmLoraLinear(torch.nn.Module, LoraLayer): def __init__( self, base_layer, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, use_rslora: bool = False, **kwargs, ): super().__init__() LoraLayer.__init__(self, base_layer) self._active_adapter = adapter_name self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora) def forward(self, x: torch.Tensor): # note: logic differs from default Linear because merging is not supported result = self.base_layer(x) if self.disable_adapters: return result for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype x = x.to(lora_A.weight.dtype) output = lora_B(lora_A(dropout(x))) if requires_conversion: output = output.to(expected_dtype) output = output * scaling result += output return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep # TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102 # def reset_lora_parameters(self, adapter_name): # if adapter_name in self.lora_A.keys(): # torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight) # torch.nn.init.zeros_(self.lora_B[adapter_name].weight) def is_aqlm_available(): return importlib.util.find_spec("aqlm") is not None class BaseTunerLayer(ABC): r""" A tuner layer mixin that provides the common methods and attributes for all tuners. Args: is_pluggable (`bool`, *optional*): Whether the adapter layer can be plugged to any pytorch module active_adapters (Union[List[`str`], `str`], *optional*): The name of the active adapter. """ active_adapter = None # All names of layers that may contain adapter (trainable) weights adapter_layer_names: tuple[str] = () # All names of other parameters that may contain adapter-related parameters other_param_names: tuple[str] = () # indicates whether all adapters should be disabled _disable_adapters: bool = False # the currently active adapter(s) _active_adapter: str | list[str] = "default" # List all merged adapters merged_adapters: list[str] = [] def get_base_layer(self) -> nn.Module: """ (Recursively) get the base_layer. This is necessary for the case that the tuner layer wraps another tuner layer. """ base_layer = self while hasattr(base_layer, "base_layer"): base_layer = base_layer.base_layer return base_layer def weight(self) -> torch.Tensor: # This is required for some transformers code, e.g. for T5, weight is accessed as: # self.wo.weight # where "wo" is the adapter layer. # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers # /models/t5/modeling_t5.py#L292 base_layer = self.get_base_layer() if hasattr(base_layer, "qweight"): # QuantLinear weight = base_layer.qweight else: # Other layers weight = base_layer.weight return weight def bias(self) -> torch.Tensor: base_layer = self.get_base_layer() return base_layer.bias def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: raise NotImplementedError def unmerge(self) -> None: raise NotImplementedError def merged(self) -> bool: return bool(self.merged_adapters) def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def active_adapters(self): if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def enable_adapters(self, enabled: bool) -> None: """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if enabled: self.set_adapter(self.active_adapters) self._disable_adapters = False else: # disable grads on all adapter layers for layer_name in self.adapter_layer_names: layer = getattr(self, layer_name) layer.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_names: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated. """ if isinstance(adapter_names, str): adapter_names = [adapter_names] # Deactivate grads on the inactive adapter and activate grads on the active adapter for layer_name in self.adapter_layer_names: module_dict = getattr(self, layer_name) for key, layer in module_dict.items(): if key in adapter_names: # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may # happen if a completely different adapter layer is being activated. layer.requires_grad_(True) else: layer.requires_grad_(False) self._active_adapter = adapter_names def _all_available_adapter_names(self) -> list[str]: """Return a sorted list of all available adapter names""" adapter_names = set() for name in self.adapter_layer_names + self.other_param_names: # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter # names attr = getattr(self, name) if hasattr(attr, "keys"): adapter_names.update(attr.keys()) return sorted(adapter_names) def delete_adapter(self, adapter_name: str) -> None: """ Delete an adapter from the layer This should be called on all adapter layers, or else we will get an inconsistent state. This method will also set a new active adapter if the deleted adapter was an active adapter. It is important that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. Args: adapter_name (`str`): The name of the adapter to delete """ for attr in self.adapter_layer_names + self.other_param_names: if adapter_name in getattr(self, attr): del getattr(self, attr)[adapter_name] if adapter_name in self.active_adapters: # choose a new active adapter active_adapters = self.active_adapters[:] active_adapters.remove(adapter_name) if active_adapters: self.set_adapter(active_adapters) else: # no active adapters left, set a new default adapter # here we get the list of all adapters existing adapter names and choose the first one remaining_adapters = self._all_available_adapter_names() if not remaining_adapters: self.set_adapter([]) else: new_active_adapter = remaining_adapters[0] warnings.warn( f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " f"{new_active_adapter}." ) self.set_adapter(remaining_adapters[0]) def dispatch_aqlm( target: torch.nn.Module, adapter_name: str, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if is_aqlm_available() and isinstance(target_base_layer, QuantizedLinear): new_module = AqlmLoraLinear(target, adapter_name, **kwargs) target.qweight = target_base_layer.codes return new_module
null
161,407
import importlib import warnings from typing import Any, Optional import torch import torch.nn as nn import torch.nn.init as init from peft.tuners.tuners_utils import BaseTunerLayer from .layer import LoraLayer class LoraParallelLinear(nn.Module, LoraLayer): """ When the target layer parallel_linear is RowParallelLinear, in order to keep the input and output shapes consistent, we need to split the lora matrix A into rows, and the lora_B at this time should be a complete linear layer; In the same way, when the target layer is ColumnParallelLinear, we perform column segmentation on lora_B, while lora_A is still a complete linear layer. """ def __init__( self, base_layer, adapter_name: str, backend, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, init_lora_weights: bool = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ): super().__init__() LoraLayer.__init__(self, base_layer=base_layer) if use_dora: raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False") self.backend = backend self.is_parallel_a = isinstance(base_layer, backend.RowParallelLinear) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name megatron_config = kwargs["megatron_config"] parallel_linear_kwargs = {"megatron_config": megatron_config} init_method = init.xavier_normal_ if hasattr(megatron_config, "init_method"): init_method = megatron_config.init_method input_is_parallel = True gather_output = False if isinstance(base_layer, self.backend.RowParallelLinear): input_is_parallel = base_layer.input_is_parallel else: gather_output = base_layer.gather_output self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, init_method=init_method, input_is_parallel=input_is_parallel, gather_output=gather_output, **parallel_linear_kwargs, ) self.is_target_conv_1d_layer = False def is_paralle_a(self): # TODO: remove it in PEFT 0.10.0 # See https://github.com/huggingface/peft/pull/1439 for more details warnings.warn( "`is_paralle_a` is going to be deprecated in a future release. Please use `is_parallel_a`", FutureWarning ) return self.is_parallel_a def update_layer( self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora=False, init_method=init.xavier_normal_, input_is_parallel=True, gather_output=False, **parallel_linear_kwargs, ): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer megatron_config = parallel_linear_kwargs["megatron_config"] # lora needs to be forced to upgrade to 32-bit precision, otherwise it will overflow megatron_config.params_dtype = torch.float32 if self.is_parallel_a: lora_a = self.backend.RowParallelLinear( input_size=self.in_features, output_size=r, bias=False, input_is_parallel=input_is_parallel, skip_bias_add=True, init_method=init_method, config=megatron_config, ) lora_b = nn.Linear(in_features=r, out_features=self.out_features, bias=False, dtype=torch.float32) else: lora_a = nn.Linear(in_features=self.in_features, out_features=r, bias=False, dtype=torch.float32) lora_b = self.backend.ColumnParallelLinear( input_size=r, output_size=self.out_features, bias=False, gather_output=gather_output, init_method=init_method, config=megatron_config, ) self.lora_A[adapter_name] = lora_a self.lora_B[adapter_name] = lora_b if use_rslora: self.scaling[adapter_name] = lora_alpha / (r**0.5) else: self.scaling[adapter_name] = lora_alpha / r if init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) weight = getattr(self.get_base_layer(), "weight", None) if weight is not None: # the layer is already completely initialized, this is an update if weight.dtype.is_floating_point or weight.dtype.is_complex: self.to(weight.device, dtype=weight.dtype) else: self.to(weight.device) self.set_adapter(self.active_adapters) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any): previous_dtype = x.dtype # If weight is used for matrix multiplication here, the final aggregation operation of the original # parallel_linear layer will be missing, so we need to directly call its forward function to obtain the # output of the original parallel_linear layer. if self.disable_adapters: if self.merged: self.unmerge() result, bias = self.base_layer(x, *args, **kwargs) elif self.merged: result, bias = self.base_layer(x, *args, **kwargs) else: result, bias = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = x.to(lora_A.weight.dtype) lora_result = lora_A(dropout(x)) if isinstance(lora_result, tuple): lora_result = lora_result[0] lora_result = lora_B(lora_result) if isinstance(lora_result, tuple): lora_result = lora_result[0] lora_result = lora_result * scaling result = result + lora_result result = result.to(previous_dtype) return result, bias class BaseTunerLayer(ABC): r""" A tuner layer mixin that provides the common methods and attributes for all tuners. Args: is_pluggable (`bool`, *optional*): Whether the adapter layer can be plugged to any pytorch module active_adapters (Union[List[`str`], `str`], *optional*): The name of the active adapter. """ active_adapter = None # All names of layers that may contain adapter (trainable) weights adapter_layer_names: tuple[str] = () # All names of other parameters that may contain adapter-related parameters other_param_names: tuple[str] = () # indicates whether all adapters should be disabled _disable_adapters: bool = False # the currently active adapter(s) _active_adapter: str | list[str] = "default" # List all merged adapters merged_adapters: list[str] = [] def get_base_layer(self) -> nn.Module: """ (Recursively) get the base_layer. This is necessary for the case that the tuner layer wraps another tuner layer. """ base_layer = self while hasattr(base_layer, "base_layer"): base_layer = base_layer.base_layer return base_layer def weight(self) -> torch.Tensor: # This is required for some transformers code, e.g. for T5, weight is accessed as: # self.wo.weight # where "wo" is the adapter layer. # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers # /models/t5/modeling_t5.py#L292 base_layer = self.get_base_layer() if hasattr(base_layer, "qweight"): # QuantLinear weight = base_layer.qweight else: # Other layers weight = base_layer.weight return weight def bias(self) -> torch.Tensor: base_layer = self.get_base_layer() return base_layer.bias def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: raise NotImplementedError def unmerge(self) -> None: raise NotImplementedError def merged(self) -> bool: return bool(self.merged_adapters) def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def active_adapters(self): if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def enable_adapters(self, enabled: bool) -> None: """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if enabled: self.set_adapter(self.active_adapters) self._disable_adapters = False else: # disable grads on all adapter layers for layer_name in self.adapter_layer_names: layer = getattr(self, layer_name) layer.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_names: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated. """ if isinstance(adapter_names, str): adapter_names = [adapter_names] # Deactivate grads on the inactive adapter and activate grads on the active adapter for layer_name in self.adapter_layer_names: module_dict = getattr(self, layer_name) for key, layer in module_dict.items(): if key in adapter_names: # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may # happen if a completely different adapter layer is being activated. layer.requires_grad_(True) else: layer.requires_grad_(False) self._active_adapter = adapter_names def _all_available_adapter_names(self) -> list[str]: """Return a sorted list of all available adapter names""" adapter_names = set() for name in self.adapter_layer_names + self.other_param_names: # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter # names attr = getattr(self, name) if hasattr(attr, "keys"): adapter_names.update(attr.keys()) return sorted(adapter_names) def delete_adapter(self, adapter_name: str) -> None: """ Delete an adapter from the layer This should be called on all adapter layers, or else we will get an inconsistent state. This method will also set a new active adapter if the deleted adapter was an active adapter. It is important that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. Args: adapter_name (`str`): The name of the adapter to delete """ for attr in self.adapter_layer_names + self.other_param_names: if adapter_name in getattr(self, attr): del getattr(self, attr)[adapter_name] if adapter_name in self.active_adapters: # choose a new active adapter active_adapters = self.active_adapters[:] active_adapters.remove(adapter_name) if active_adapters: self.set_adapter(active_adapters) else: # no active adapters left, set a new default adapter # here we get the list of all adapters existing adapter names and choose the first one remaining_adapters = self._all_available_adapter_names() if not remaining_adapters: self.set_adapter([]) else: new_active_adapter = remaining_adapters[0] warnings.warn( f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " f"{new_active_adapter}." ) self.set_adapter(remaining_adapters[0]) def dispatch_megatron( target: torch.nn.Module, adapter_name: str, lora_config, **kwargs: Any, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if lora_config.megatron_config: megatron_core = importlib.import_module(lora_config.megatron_core) else: megatron_core = None if megatron_core and isinstance( target_base_layer, (megatron_core.tensor_parallel.ColumnParallelLinear, megatron_core.tensor_parallel.RowParallelLinear), ): megatron_kwargs = kwargs.copy() megatron_config = lora_config.megatron_config if isinstance(megatron_config, dict): transformer_config_class = megatron_core.transformer.transformer_config.TransformerConfig megatron_config = transformer_config_class(**lora_config.megatron_config) megatron_kwargs["megatron_config"] = megatron_config if megatron_kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `ColumnParallelLinear` " "or `RowParallelLinear`. " "Setting fan_in_fan_out to False." ) megatron_kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False new_module = LoraParallelLinear( base_layer=target, adapter_name=adapter_name, backend=megatron_core.tensor_parallel, **megatron_kwargs ) return new_module
null
161,408
import math import warnings from typing import Any, List, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from transformers.pytorch_utils import Conv1D from peft.tuners.tuners_utils import BaseTunerLayer, check_adapters_to_merge from peft.utils.integrations import dequantize_bnb_weight, gather_params_ctx from peft.utils.other import transpose from .config import LoraConfig class Linear(nn.Module, LoraLayer): # Lora implemented in a dense layer def __init__( self, base_layer, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_target_conv_1d_layer: bool = False, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer, **kwargs) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, ) self.is_target_conv_1d_layer = is_target_conv_1d_layer def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: orig_weights = orig_weights + delta_weight else: # handle dora # since delta_weight already includes scaling, set it to 1 here weight_norm = self._get_weight_norm(orig_weights, delta_weight, scaling=1).detach() # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm orig_weights = dora_factor.view(-1, 1) * (orig_weights + delta_weight) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: base_layer.weight.data = base_layer.weight.data + delta_weight else: # handle dora # since delta_weight already includes scaling, set it to 1 here weight_norm = self._get_weight_norm(base_layer.weight, delta_weight, scaling=1).detach() # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm new_weight = dora_factor.view(-1, 1) * (base_layer.weight.data + delta_weight) base_layer.weight.data = new_weight self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): weight = self.get_base_layer().weight delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: weight.data -= delta_weight else: weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm weight_orig = weight.data / dora_factor.view(-1, 1) - delta_weight weight.data = weight_orig def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_B[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = x.to(lora_A.weight.dtype) if not self.use_dora[active_adapter]: result = result + lora_B(lora_A(dropout(x))) * scaling else: x = dropout(x) result = result + self._apply_dora(x, lora_A, lora_B, scaling, active_adapter) result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class Embedding(nn.Module, LoraLayer): # LoRA implemented in a Embedding layer def __init__( self, base_layer: nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) if use_dora: raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False") self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, ) def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer # Actual trainable parameters weight_A = torch.randn((r, self.in_features)) weight_B = torch.randn((self.out_features, r)) self.lora_embedding_A[adapter_name] = nn.Parameter(weight_A) self.lora_embedding_B[adapter_name] = nn.Parameter(weight_B) if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r if init_lora_weights == "loftq": self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) base_layer = self.get_base_layer() weight = getattr(base_layer, "weight", None) if weight is not None: # the layer is already completely initialized, this is an update self.to(base_layer.weight.device, dtype=weight.dtype) self.set_adapter(self.active_adapters) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_embedding_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() orig_weights = orig_weights + self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data = base_layer.weight.data + self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_embedding_A.keys(): self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_embedding_B[adapter].device dtype = self.lora_embedding_A[adapter].dtype # In case users wants to merge the adapter weights that are in # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 weight_A = self.lora_embedding_A[adapter] weight_B = self.lora_embedding_B[adapter] if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = transpose(weight_B @ weight_A, True) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_embedding_A[adapter] = weight_A.to(dtype) self.lora_embedding_B[adapter] = weight_B.to(dtype) return output_tensor def _embed(self, input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: base_layer = self.get_base_layer() return F.embedding( input, weight, padding_idx=base_layer.padding_idx, max_norm=base_layer.max_norm, norm_type=base_layer.norm_type, scale_grad_by_freq=base_layer.scale_grad_by_freq, sparse=base_layer.sparse, ) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # TODO: no dtype conversion here, unlike in Linear, is that correct? if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self.lora_embedding_A: continue embedding_A = self.lora_embedding_A[active_adapter].T embedding_B = self.lora_embedding_B[active_adapter].T scaling = self.scaling[active_adapter] after_A = self._embed(x, embedding_A) result = result + (after_A @ embedding_B) * scaling result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class Conv2d(nn.Module, LoraLayer): # Lora implemented in a conv2d layer def __init__( self, base_layer: nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: Union[bool, str] = True, use_rslora: bool = False, use_dora: bool = False, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) self._active_adapter = adapter_name self.update_layer( adapter_name, r, lora_alpha=lora_alpha, lora_dropout=lora_dropout, init_lora_weights=init_lora_weights, use_rslora=use_rslora, use_dora=use_dora, ) def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, use_rslora, use_dora): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer # Actual trainable parameters base_layer = self.get_base_layer() kernel_size = base_layer.kernel_size stride = base_layer.stride padding = base_layer.padding self.lora_A[adapter_name] = nn.Conv2d(self.in_features, r, kernel_size, stride, padding, bias=False) self.lora_B[adapter_name] = nn.Conv2d(r, self.out_features, (1, 1), (1, 1), bias=False) if use_rslora: self.scaling[adapter_name] = lora_alpha / math.sqrt(r) else: self.scaling[adapter_name] = lora_alpha / r if init_lora_weights == "loftq": self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) weight = getattr(base_layer, "weight", None) if weight is not None: # the layer is already completely initialized, this is an update self.to(base_layer.weight.device, dtype=weight.dtype) if use_dora: self.dora_init(adapter_name) self.use_dora[adapter_name] = True else: self.use_dora[adapter_name] = False self.set_adapter(self.active_adapters) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights inside the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ adapter_names = check_adapters_to_merge(self, adapter_names) if not adapter_names: # no adapter to merge return for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: orig_weights = orig_weights + delta_weight else: # handle dora # since delta_weight already includes scaling, set it to 1 here weight_norm = self._get_weight_norm(orig_weights, delta_weight, scaling=1).detach() # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm orig_weights = dora_factor.view(-1, 1, 1, 1) * (orig_weights + delta_weight) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: base_layer.weight.data = base_layer.weight.data + delta_weight else: # handle dora # since delta_weight already includes scaling, set it to 1 here weight_norm = self._get_weight_norm(base_layer.weight, delta_weight, scaling=1).detach() # We need to cache weight_norm because it has to be based on the original weights. We # cannot calculate it on the fly based on the merged weights when unmerging because its a # different value self._cache_store(f"{active_adapter}-weight_norm", weight_norm) dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm new_weight = dora_factor.view(-1, 1, 1, 1) * (base_layer.weight.data + delta_weight) base_layer.weight.data = new_weight self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): weight = self.get_base_layer().weight delta_weight = self.get_delta_weight(active_adapter) if not self.use_dora[active_adapter]: weight.data -= delta_weight else: weight_norm = self._cache_pop(f"{active_adapter}-weight_norm") dora_factor = self.lora_magnitude_vector[active_adapter] / weight_norm weight_orig = weight.data / dora_factor.view(-1, 1, 1, 1) - delta_weight weight.data = weight_orig def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_A[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() # https://github.com/bmaltais/kohya_ss/blob/feb6728762a8f463d15ba936d189d4c3abfaa1ab/networks/lora.py#L117 if self.get_base_layer().weight.size()[2:4] == (1, 1): # conv2d 1x1 output_tensor = (weight_B.squeeze(3).squeeze(2) @ weight_A.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze( 3 ) * self.scaling[adapter] else: # conv2d 3x3 output_tensor = ( F.conv2d( weight_A.permute(1, 0, 2, 3), weight_B, ).permute(1, 0, 2, 3) * self.scaling[adapter] ) if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def _get_weight_norm(self, weight, lora_weight, scaling) -> torch.Tensor: # calculate L2 norm of weight matrix, channel-wise weight = weight + scaling * lora_weight # the following is needed to have compatibility with the 4D weight tensors of Conv2D weight_norm = weight.norm(p=2, dim=(1, 2, 3), keepdim=True).transpose(1, 0) return weight_norm def _apply_dora(self, x, lora_A, lora_B, scaling, active_adapter): """ For DoRA, calculate the extra output from LoRA with DoRA applied. This should be added on top of the base layer output. """ base_layer = self.get_base_layer() weight = base_layer.weight lora_weight = torch.mm(lora_B.weight.flatten(start_dim=1), lora_A.weight.flatten(start_dim=1)) lora_weight = lora_weight.reshape(weight.shape) magnitude = self.lora_magnitude_vector[active_adapter] weight_norm = self._get_weight_norm(weight, lora_weight, scaling) # see section 4.3 of DoRA (https://arxiv.org/abs/2402.09353) # "[...] we suggest treating ||V +∆V ||_c in # Eq. (5) as a constant, thereby detaching it from the gradient # graph. This means that while ||V + ∆V ||_c dynamically # reflects the updates of ∆V , it won’t receive any gradient # during backpropagation" weight_norm = weight_norm.detach() mag_norm_scale = magnitude / weight_norm result_dora = (mag_norm_scale - 1) * ( F.conv2d( x, weight, bias=None, stride=base_layer.stride, padding=base_layer.padding, dilation=base_layer.dilation, groups=base_layer.groups, ) ) + mag_norm_scale * lora_B(lora_A(x)) * scaling return result_dora def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) torch_result_dtype = result.dtype for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = x.to(lora_A.weight.dtype) if not self.use_dora[active_adapter]: result = result + lora_B(lora_A(dropout(x))) * scaling else: x = dropout(x) result = result + self._apply_dora(x, lora_A, lora_B, scaling, active_adapter) result = result.to(torch_result_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class BaseTunerLayer(ABC): r""" A tuner layer mixin that provides the common methods and attributes for all tuners. Args: is_pluggable (`bool`, *optional*): Whether the adapter layer can be plugged to any pytorch module active_adapters (Union[List[`str`], `str`], *optional*): The name of the active adapter. """ active_adapter = None # All names of layers that may contain adapter (trainable) weights adapter_layer_names: tuple[str] = () # All names of other parameters that may contain adapter-related parameters other_param_names: tuple[str] = () # indicates whether all adapters should be disabled _disable_adapters: bool = False # the currently active adapter(s) _active_adapter: str | list[str] = "default" # List all merged adapters merged_adapters: list[str] = [] def get_base_layer(self) -> nn.Module: """ (Recursively) get the base_layer. This is necessary for the case that the tuner layer wraps another tuner layer. """ base_layer = self while hasattr(base_layer, "base_layer"): base_layer = base_layer.base_layer return base_layer def weight(self) -> torch.Tensor: # This is required for some transformers code, e.g. for T5, weight is accessed as: # self.wo.weight # where "wo" is the adapter layer. # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers # /models/t5/modeling_t5.py#L292 base_layer = self.get_base_layer() if hasattr(base_layer, "qweight"): # QuantLinear weight = base_layer.qweight else: # Other layers weight = base_layer.weight return weight def bias(self) -> torch.Tensor: base_layer = self.get_base_layer() return base_layer.bias def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: raise NotImplementedError def unmerge(self) -> None: raise NotImplementedError def merged(self) -> bool: return bool(self.merged_adapters) def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def active_adapters(self): if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def enable_adapters(self, enabled: bool) -> None: """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if enabled: self.set_adapter(self.active_adapters) self._disable_adapters = False else: # disable grads on all adapter layers for layer_name in self.adapter_layer_names: layer = getattr(self, layer_name) layer.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_names: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated. """ if isinstance(adapter_names, str): adapter_names = [adapter_names] # Deactivate grads on the inactive adapter and activate grads on the active adapter for layer_name in self.adapter_layer_names: module_dict = getattr(self, layer_name) for key, layer in module_dict.items(): if key in adapter_names: # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may # happen if a completely different adapter layer is being activated. layer.requires_grad_(True) else: layer.requires_grad_(False) self._active_adapter = adapter_names def _all_available_adapter_names(self) -> list[str]: """Return a sorted list of all available adapter names""" adapter_names = set() for name in self.adapter_layer_names + self.other_param_names: # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter # names attr = getattr(self, name) if hasattr(attr, "keys"): adapter_names.update(attr.keys()) return sorted(adapter_names) def delete_adapter(self, adapter_name: str) -> None: """ Delete an adapter from the layer This should be called on all adapter layers, or else we will get an inconsistent state. This method will also set a new active adapter if the deleted adapter was an active adapter. It is important that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. Args: adapter_name (`str`): The name of the adapter to delete """ for attr in self.adapter_layer_names + self.other_param_names: if adapter_name in getattr(self, attr): del getattr(self, attr)[adapter_name] if adapter_name in self.active_adapters: # choose a new active adapter active_adapters = self.active_adapters[:] active_adapters.remove(adapter_name) if active_adapters: self.set_adapter(active_adapters) else: # no active adapters left, set a new default adapter # here we get the list of all adapters existing adapter names and choose the first one remaining_adapters = self._all_available_adapter_names() if not remaining_adapters: self.set_adapter([]) else: new_active_adapter = remaining_adapters[0] warnings.warn( f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " f"{new_active_adapter}." ) self.set_adapter(remaining_adapters[0]) class LoraConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`LoraModel`]. Args: r (`int`): Lora attention dimension (the "rank"). target_modules (`Optional[Union[List[str], str]]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen, excluding the output layer. If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. lora_alpha (`int`): The alpha parameter for Lora scaling. lora_dropout (`float`): The dropout probability for Lora layers. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. bias (`str`): Bias type for LoRA. Can be 'none', 'all' or 'lora_only'. If 'all' or 'lora_only', the corresponding biases will be updated during training. Be aware that this means that, even when disabling the adapters, the model will not produce the same output as the base model would have without adaptation. use_rslora (`bool`): When set to True, uses <a href='https://doi.org/10.48550/arXiv.2312.03732'>Rank-Stabilized LoRA</a> which sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it was proven to work better. Otherwise, it will use the original default value of `lora_alpha/r`. modules_to_save (`List[str]`): List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. init_lora_weights (`bool` | `Literal["gaussian", "loftq"]`): How to initialize the weights of the adapter layers. Passing True (default) results in the default initialization from the reference implementation from Microsoft. Passing 'gaussian' results in Gaussian initialization scaled by the LoRA rank for linear and layers. Setting the initialization to False leads to completely random initialization and is discouraged. Pass `'loftq'` to use LoftQ initialization. layers_to_transform (`Union[List[int], int]`): The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices that are specified in this list. If a single integer is passed, it will apply the transformations on the layer at this index. layers_pattern (`str`): The layer pattern name, used only if `layers_to_transform` is different from `None`. rank_pattern (`dict`): The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. alpha_pattern (`dict`): The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. megatron_config (`Optional[dict]`): The TransformerConfig arguments for Megatron. It is used to create LoRA's parallel linear layer. You can get it like this, `core_transformer_config_from_args(get_args())`, these two functions being from Megatron. The arguments will be used to initialize the TransformerConfig of Megatron. You need to specify this parameter when you want to apply LoRA to the ColumnParallelLinear and RowParallelLinear layers of megatron. megatron_core (`Optional[str]`): The core module from Megatron to use, defaults to `"megatron.core"`. loftq_config (`Optional[LoftQConfig]`): The configuration of LoftQ. If this is not None, then LoftQ will be used to quantize the backbone weights and initialize Lora layers. Also pass `init_lora_weights='loftq'`. Note that you should not pass a quantized model in this case, as LoftQ will quantize the model itself. use_dora (`bool`): Enable 'Weight-Decomposed Low-Rank Adaptation' (DoRA). This technique decomposes the updates of the weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA especially at low ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger overhead than pure LoRA, so it is recommended to merge weights for inference. For more information, see https://arxiv.org/abs/2402.09353. layer_replication(`List[Tuple[int, int]]`): Build a new stack of layers by stacking the original model layers according to the ranges specified. This allows expanding (or shrinking) the model without duplicating the base model weights. The new layers will all have separate LoRA adapters attached to them. """ r: int = field(default=8, metadata={"help": "Lora attention dimension"}) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "List of module names or regex expression of the module names to replace with LoRA." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'." "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." "If not specified, modules will be chosen according to the model architecture, If the architecture is " "not known, an error will be raised -- in this case, you should specify the target modules manually." ), }, ) lora_alpha: int = field(default=8, metadata={"help": "Lora alpha"}) lora_dropout: float = field(default=0.0, metadata={"help": "Lora dropout"}) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) bias: Literal["none", "all", "lora_only"] = field( default="none", metadata={"help": "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"} ) use_rslora: bool = field( default=False, metadata={ "help": ( "When set to True, uses Rank-Stabilized LoRA doi.org/10.48550/arXiv.2312.03732" " which sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it" " was proven to work better. Otherwise, it will use the original default" " value of `lora_alpha/r`." ) }, ) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": "List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) init_lora_weights: bool | Literal["gaussian", "loftq"] = field( default=True, metadata={ "help": ( "How to initialize the weights of the LoRA layers. Passing True (default) results in the default " "initialization from the reference implementation from Microsoft. Passing 'gaussian' results " "in Gaussian initialization scaled by the LoRA rank for linear and layers. Setting the initialization " "to False leads to completely random initialization and is discouraged." "Pass `'loftq'` to use LoftQ initialization" ), }, ) layers_to_transform: Optional[Union[list[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. " "This only works when target_modules is a list of str." }, ) layers_pattern: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." "This only works when target_modules is a list of str." }, ) rank_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}" ) }, ) alpha_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}" ) }, ) megatron_config: Optional[dict] = field( default=None, metadata={ "help": ( "The TransformerConfig from Megatron. It is used to create LoRA's parallel linear layer." "You can get it like this, `core_transformer_config_from_args(get_args())`, " "these two functions being from Megatron." "You need to specify this parameter when you want to apply LoRA to the ColumnParallelLinear and " "RowParallelLinear layers of megatron." "It should be noted that we may not be able to use the `save_pretrained` and `from_pretrained` " "functions, because TransformerConfig may not necessarily be serialized." "But when using megatron, we can use `get_peft_model_state_dict` function and " "megatron's framework, they can also save and load models and configurations." ) }, ) megatron_core: Optional[str] = field( default="megatron.core", metadata={ "help": ( "The core module from Megatron, it is used to create LoRA's parallel linear layer. " "It only needs to be passed in when you need to use your own modified megatron core module. " "Otherwise, it will use the default value `megatron.core`. " ) }, ) # dict type is used when loading config.json loftq_config: Union[LoftQConfig, dict] = field( default_factory=dict, metadata={ "help": ( "The configuration of LoftQ. If this is passed, then LoftQ will be used to quantize the backbone " "weights and initialize Lora layers. Also set `init_lora_weights='loftq'` in this case." ) }, ) use_dora: bool = field( default=False, metadata={ "help": ( "Enable 'Weight-Decomposed Low-Rank Adaptation' (DoRA). This technique decomposes the updates of the " "weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the " "magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA, " "especially at low ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger" "overhead than pure LoRA, so it is recommended to merge weights for inference. For more information, " "see https://arxiv.org/abs/2402.09353." ) }, ) # Enables replicating layers in a model to expand it to a larger model. layer_replication: Optional[list[tuple[int, int]]] = field( default=None, metadata={ "help": ( "This enables using LoRA to effectively expand a transformer model to a larger size by repeating some layers. " "The transformation handles models (currently Llama, Bert or Falcon compatible architectures) with " "a module list in the model which it modifies to expand the number of modules. " "Base weights are shared so the memory usage is close to the original model. The intended use is these base weights " "remain fixed during finetuning but each layer has a separate LoRA adapter so the layers can be specialed via " "the adapter layers fit during fine tuning." "The format is a list of [start, end) pairs which specify the layer ranges to stack. For example:\n" " Original model has 5 layers labelled by their position in the model: `[0, 1, 2, 3, 4]`\n" " layer_replication: `[[0, 4], [2, 5]]`\n" " Final model will have this arrangement of original layers: `[0, 1, 2, 3, 2, 3, 4]`\n" "This format is based on what is used for pass-through merges in mergekit. It makes it simple to select sequential " "ranges of a model and stack them while reusing layers at either end of each sequence." ) }, ) def __post_init__(self): self.peft_type = PeftType.LORA self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) # if target_modules is a regex expression, then layers_to_transform should be None if isinstance(self.target_modules, str) and self.layers_to_transform is not None: raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.") # if target_modules is a regex expression, then layers_pattern should be None if isinstance(self.target_modules, str) and self.layers_pattern is not None: raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.") if self.use_dora and self.megatron_config: raise ValueError("DoRA does not support megatron_core, please set `use_dora=False`.") # handle init_lora_weights and loftq_config if self.init_lora_weights == "loftq": import importlib if not importlib.util.find_spec("scipy"): raise ImportError("The required package 'scipy' is not installed. Please install it to continue.") if self.loftq_config is None: raise ValueError("`loftq_config` must be specified when `init_lora_weights` is 'loftq'.") # convert loftq_config to dict if self.loftq_config and not isinstance(self.loftq_config, dict): self.loftq_config = vars(self.loftq_config) def dispatch_default( target: torch.nn.Module, adapter_name: str, lora_config: LoraConfig, **kwargs, ) -> Optional[torch.nn.Module]: new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if isinstance(target_base_layer, torch.nn.Embedding): embedding_kwargs = kwargs.copy() embedding_kwargs.pop("fan_in_fan_out", None) embedding_kwargs.update(lora_config.loftq_config) new_module = Embedding(target, adapter_name, **embedding_kwargs) elif isinstance(target_base_layer, torch.nn.Conv2d): kwargs.update(lora_config.loftq_config) new_module = Conv2d(target, adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.Linear): if kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " "Setting fan_in_fan_out to False." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False kwargs.update(lora_config.loftq_config) new_module = Linear(target, adapter_name, **kwargs) elif isinstance(target_base_layer, Conv1D): if not kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True kwargs.update(lora_config.loftq_config) new_module = Linear(target, adapter_name, is_target_conv_1d_layer=True, **kwargs) return new_module
null
161,409
import inspect import torch import torch.nn as nn def llama_rotate_half(x: torch.Tensor) -> torch.Tensor: """ Rotate half the hidden dims of the input. This function was duplicated verbatim from: https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L126 This was done to eliminate the Llama transformers implementation as a dependency of this file. Note that some other functions were also adapted from the transformers implementation but were modified. """ x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def llama_apply_rotary_pos_emb(q, cos, sin, position_ids): """ Apply rotary position embedding to query states in the Llama model. This function was adapted from: https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L133 It was modified to remove unnecessary processing of key states. The method is compatible with transformers <= 4.34.2 and also with the latest version (>=4.35). """ # In previous transformers version cos/sin cached had a shape of 4D if len(cos.shape) == 4: gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1] gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3]) cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) # In the new version, it is 2D so we fall back to the new implementation # https://github.com/huggingface/transformers/blame/eef7ea98c31a333bacdc7ae7a2372bde772be8e4/src/transformers/models/llama/modeling_llama.py#L222-L226 else: cos = cos[position_ids].unsqueeze(1) sin = sin[position_ids].unsqueeze(1) q_embed = (q * cos) + (llama_rotate_half(q) * sin) return q_embed The provided code snippet includes necessary dependencies for implementing the `llama_compute_query_states` function. Write a Python function `def llama_compute_query_states(model: nn.Module, **kwargs) -> torch.Tensor` to solve the following problem: Compute query states for Llama models specifically. They need to be recomputed as the forward() method of the original LlamaModel in the transformers library does not return them. See the related discussion in the PR: https://github.com/huggingface/peft/pull/268 Here is the function: def llama_compute_query_states(model: nn.Module, **kwargs) -> torch.Tensor: """ Compute query states for Llama models specifically. They need to be recomputed as the forward() method of the original LlamaModel in the transformers library does not return them. See the related discussion in the PR: https://github.com/huggingface/peft/pull/268 """ hidden_states = kwargs.get("hidden_states") position_ids = kwargs.get("position_ids") past_key_value = kwargs.get("past_key_value") bsz, q_len, _ = hidden_states.size() query_states = model.q_proj(hidden_states).view(bsz, q_len, model.num_heads, model.head_dim).transpose(1, 2) factor = model.k_proj.in_features // model.k_proj.out_features value_states = ( model.v_proj(hidden_states).view(bsz, q_len, (model.num_heads // factor), model.head_dim).transpose(1, 2) ) seq_len = q_len if past_key_value is not None: if isinstance(past_key_value, tuple): # for transformers <= 4.35 seq_len += past_key_value[0].shape[-2] else: # since transformers 4.36, this is a DynamicCache instance seq_len += past_key_value.get_seq_length(model.layer_idx) # For transformers > 4.37.2 `position_ids` became a required arguments in the rotary embedding's forward pass. if "position_ids" not in inspect.signature(model.rotary_emb.forward).parameters: # TODO we assume that position_ids is not None here, not sure if that is safe but the old code also did that cos, sin = model.rotary_emb(value_states, seq_len=seq_len) return llama_apply_rotary_pos_emb(query_states, cos, sin, position_ids) past_seen_tokens = 0 if position_ids is None: # Compute position_ids, since they are required for transformers > 4.37.2 if past_key_value is None: new_cache_positions = torch.arange(q_len, q_len + q_len, device=value_states.device) else: past_seen_tokens = past_key_value.get_usable_length(q_len, model.layer_idx) new_cache_positions = torch.arange(past_seen_tokens, past_seen_tokens + q_len, device=value_states.device) position_ids = new_cache_positions.unsqueeze(0) rotary_emb_kwargs = {"position_ids": position_ids} # The `seq_len` argument has been officially removed in transformers >= 4.39.0 if "seq_len" in inspect.signature(model.rotary_emb.forward).parameters: rotary_emb_kwargs["seq_len"] = q_len + past_seen_tokens cos, sin = model.rotary_emb(value_states, **rotary_emb_kwargs) # For batched inference unsqueeze it on the correct dim # since: https://github.com/huggingface/transformers/pull/29109 if len(cos.shape) == 3: cos = cos.unsqueeze(1) sin = sin.unsqueeze(1) return (query_states * cos) + (llama_rotate_half(query_states) * sin)
Compute query states for Llama models specifically. They need to be recomputed as the forward() method of the original LlamaModel in the transformers library does not return them. See the related discussion in the PR: https://github.com/huggingface/peft/pull/268
161,410
import inspect import torch import torch.nn as nn The provided code snippet includes necessary dependencies for implementing the `is_adaption_prompt_trainable` function. Write a Python function `def is_adaption_prompt_trainable(params: str) -> bool` to solve the following problem: Return True if module is trainable under adaption prompt fine-tuning. Here is the function: def is_adaption_prompt_trainable(params: str) -> bool: """Return True if module is trainable under adaption prompt fine-tuning.""" return params.split(".")[-1].startswith("adaption_")
Return True if module is trainable under adaption prompt fine-tuning.
161,411
from collections import namedtuple from dataclasses import dataclass, field from peft.config import PeftConfig from peft.utils import PeftType from .utils import llama_compute_query_states class AdaptionPromptConfig(PeftConfig): """Stores the configuration of an [`AdaptionPromptModel`].""" target_modules: str = field( default=None, metadata={"help": "Name of the attention submodules to insert adaption prompts into."} ) adapter_len: int = field(default=None, metadata={"help": "Number of adapter tokens to insert"}) adapter_layers: int = field(default=None, metadata={"help": "Number of adapter layers (from the top)"}) def __post_init__(self): self.peft_type = PeftType.ADAPTION_PROMPT def is_adaption_prompt(self) -> bool: """Return True if this is an adaption prompt config.""" return True TRANSFORMERS_MODEL_CONFIG = { "llama": ModelTypeConfig( compute_query_states=llama_compute_query_states, target_modules="self_attn", k_proj_layer="k_proj", v_proj_layer="v_proj", o_proj_layer="o_proj", ), "mistral": ModelTypeConfig( # same as llama, compute_query_states=llama_compute_query_states, target_modules="self_attn", k_proj_layer="k_proj", v_proj_layer="v_proj", o_proj_layer="o_proj", ), } The provided code snippet includes necessary dependencies for implementing the `prepare_config` function. Write a Python function `def prepare_config( peft_config: AdaptionPromptConfig, model, ) -> AdaptionPromptConfig` to solve the following problem: Prepare the config based on the llama model type. Here is the function: def prepare_config( peft_config: AdaptionPromptConfig, model, ) -> AdaptionPromptConfig: """Prepare the config based on the llama model type.""" if model.config.model_type not in TRANSFORMERS_MODEL_CONFIG: raise ValueError("Unsupported model type for adaption prompt: '{model.config.model_type}'.") model_config = TRANSFORMERS_MODEL_CONFIG[model.config.model_type] if peft_config.target_modules is None: peft_config.target_modules = model_config.target_modules return peft_config
Prepare the config based on the llama model type.
161,412
import inspect from copy import deepcopy from functools import update_wrapper from types import MethodType from .peft_model import PeftModel def update_forward_signature(model: PeftModel) -> None: """ Args: Updates the forward signature of the PeftModel to include parents class signature model (`PeftModel`): Peft model to update the forward signature Example: ```python >>> from transformers import WhisperForConditionalGeneration >>> from peft import get_peft_model, LoraConfig, update_forward_signature >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") >>> peft_config = LoraConfig(r=8, lora_alpha=32, lora_dropout=0.1, target_modules=["q_proj", "v_proj"]) >>> peft_model = get_peft_model(model, peft_config) >>> update_forward_signature(peft_model) ``` """ # Only update signature when the current forward signature only has *args and **kwargs current_signature = inspect.signature(model.forward) if ( len(current_signature.parameters) == 2 and "args" in current_signature.parameters and "kwargs" in current_signature.parameters ): forward = deepcopy(model.forward.__func__) update_wrapper( forward, type(model.get_base_model()).forward, assigned=("__doc__", "__name__", "__annotations__") ) model.forward = MethodType(forward, model) def update_generate_signature(model: PeftModel) -> None: """ Args: Updates the generate signature of a PeftModel with overriding generate to include parents class signature model (`PeftModel`): Peft model to update the generate signature Example: ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> from peft import get_peft_model, LoraConfig, TaskType, update_generate_signature >>> model_name_or_path = "bigscience/mt0-large" >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) >>> peft_config = LoraConfig( ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ... ) >>> peft_model = get_peft_model(model, peft_config) >>> update_generate_signature(peft_model) >>> help(peft_model.generate) ``` """ if not hasattr(model, "generate"): return current_signature = inspect.signature(model.generate) if ( len(current_signature.parameters) == 2 and "args" in current_signature.parameters and "kwargs" in current_signature.parameters ) or (len(current_signature.parameters) == 1 and "kwargs" in current_signature.parameters): generate = deepcopy(model.generate.__func__) update_wrapper( generate, type(model.get_base_model()).generate, assigned=("__doc__", "__name__", "__annotations__"), ) model.generate = MethodType(generate, model) class PeftModel(PushToHubMixin, torch.nn.Module): """ Base model encompassing various Peft methods. Args: model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. peft_config ([`PeftConfig`]): The configuration of the Peft model. adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. **Attributes**: - **base_model** ([`torch.nn.Module`]) -- The base transformer model used for Peft. - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when saving the model. - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if using [`PromptLearningConfig`]. - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if using [`PromptLearningConfig`]. - **transformer_backbone_name** (`str`) -- The name of the transformer backbone in the base model if using [`PromptLearningConfig`]. - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone in the base model if using [`PromptLearningConfig`]. """ def __init__(self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__() self.modules_to_save = None self.active_adapter = adapter_name self.peft_type = peft_config.peft_type self._is_prompt_learning = peft_config.is_prompt_learning if self._is_prompt_learning: self._peft_config = {adapter_name: peft_config} self.base_model = model self.add_adapter(adapter_name, peft_config) else: self._peft_config = None cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type] self.base_model = cls(model, {adapter_name: peft_config}, adapter_name) self.set_additional_trainable_modules(peft_config, adapter_name) if getattr(model, "is_gradient_checkpointing", True): model = self._prepare_model_for_gradient_checkpointing(model) # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected # behavior we disable that in this line. if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): self.base_model.config.pretraining_tp = 1 def peft_config(self) -> dict[str, PeftConfig]: if self._is_prompt_learning: return self._peft_config return self.base_model.peft_config def active_adapters(self) -> list[str]: try: adapters = self.base_model.active_adapters except AttributeError: adapters = self.active_adapter if isinstance(adapters, str): adapters = [adapters] return adapters def peft_config(self, value: dict[str, PeftConfig]): if self._is_prompt_learning: self._peft_config = value else: self.base_model.peft_config = value def save_pretrained( self, save_directory: str, safe_serialization: bool = True, selected_adapters: Optional[list[str]] = None, save_embedding_layers: Union[str, bool] = "auto", is_main_process: bool = True, **kwargs: Any, ) -> None: r""" This function saves the adapter model and the adapter configuration files to a directory, so that it can be reloaded using the [`PeftModel.from_pretrained`] class method, and also used by the [`PeftModel.push_to_hub`] method. Args: save_directory (`str`): Directory where the adapter model and configuration files will be saved (will be created if it does not exist). safe_serialization (`bool`, *optional*): Whether to save the adapter files in safetensors format, defaults to `True`. selected_adapters (`List[str]`, *optional*): A list of adapters to be saved. If `None`, will default to all adapters. save_embedding_layers (`Union[bool, str]`, *optional*, defaults to `"auto"`): If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. and automatically sets the boolean flag. This only works for 🤗 transformers models. is_main_process (`bool`, *optional*): Whether the process calling this is the main process or not. Will default to `True`. Will not save the checkpoint if not on the main process, which is important for multi device setups (e.g. DDP). kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the `push_to_hub` method. """ if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") if selected_adapters is None: selected_adapters = list(self.peft_config.keys()) else: if any( selected_adapter_name not in list(self.peft_config.keys()) for selected_adapter_name in selected_adapters ): raise ValueError( f"You passed an invalid `selected_adapters` arguments, current supported adapter names are" f" {list(self.peft_config.keys())} - got {selected_adapters}." ) if is_main_process: os.makedirs(save_directory, exist_ok=True) self.create_or_update_model_card(save_directory) for adapter_name in selected_adapters: peft_config = self.peft_config[adapter_name] # save only the trainable weights output_state_dict = get_peft_model_state_dict( self, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name, save_embedding_layers=save_embedding_layers, ) output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory os.makedirs(output_dir, exist_ok=True) if is_main_process and safe_serialization: # Section copied from: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L2111-L2134 # Safetensors does not allow tensor aliasing. # We're going to remove aliases before saving ptrs = collections.defaultdict(list) for name, tensor in output_state_dict.items(): # Sometimes in the state_dict we have non-tensor objects. # e.g. in bitsandbytes we have some `str` objects in the state_dict if isinstance(tensor, torch.Tensor): ptrs[id_tensor_storage(tensor)].append(name) else: # In the non-tensor case, fall back to the pointer of the object itself ptrs[id(tensor)].append(name) # These are all the pointers of shared tensors. shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} for _, names in shared_ptrs.items(): # Here we just clone the shared tensors to avoid tensor aliasing which is # not supported in safetensors. for shared_tensor_name in names[1:]: output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone() safe_save_file( output_state_dict, os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME), metadata={"format": "pt"}, ) elif is_main_process: torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) # save the config and change the inference mode to `True` if peft_config.base_model_name_or_path is None: peft_config.base_model_name_or_path = ( self.base_model.__dict__.get("name_or_path", None) if peft_config.is_prompt_learning else self.base_model.model.__dict__.get("name_or_path", None) ) inference_mode = peft_config.inference_mode peft_config.inference_mode = True if peft_config.task_type is None: # deal with auto mapping base_model_class = self._get_base_model_class( is_prompt_tuning=peft_config.is_prompt_learning, ) parent_library = base_model_class.__module__ auto_mapping_dict = { "base_model_class": base_model_class.__name__, "parent_library": parent_library, } else: auto_mapping_dict = None if is_main_process: peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict) peft_config.inference_mode = inference_mode def from_pretrained( cls, model: torch.nn.Module, model_id: Union[str, os.PathLike], adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, **kwargs: Any, ) -> PeftModel: r""" Instantiate a PEFT model from a pretrained model and loaded PEFT weights. Note that the passed `model` may be modified inplace. Args: model ([`torch.nn.Module`]): The model to be adapted. For 🤗 Transformers models, the model should be initialized with the [`~transformers.PreTrainedModel.from_pretrained`]. model_id (`str` or `os.PathLike`): The name of the PEFT configuration to use. Can be either: - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` method (`./my_peft_config_directory/`). adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to be loaded. This is useful for loading multiple adapters. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. config ([`~peft.PeftConfig`], *optional*): The configuration object to use instead of an automatically loaded configuration. This configuration object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already loaded before calling `from_pretrained`. kwargs: (`optional`): Additional keyword arguments passed along to the specific PEFT configuration class. """ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING # load the config if config is None: config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, subfolder=kwargs.get("subfolder", None), revision=kwargs.get("revision", None), cache_dir=kwargs.get("cache_dir", None), use_auth_token=kwargs.get("use_auth_token", None), token=kwargs.get("token", None), ) ].from_pretrained(model_id, **kwargs) elif isinstance(config, PeftConfig): config.inference_mode = not is_trainable else: raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") if (getattr(model, "hf_device_map", None) is not None) and len( set(model.hf_device_map.values()).intersection({"cpu", "disk"}) ) > 0: remove_hook_from_submodules(model) if config.is_prompt_learning and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: config.inference_mode = not is_trainable if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys(): model = cls(model, config, adapter_name) else: model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name) model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) return model def _setup_prompt_encoder(self, adapter_name: str): config = self.peft_config[adapter_name] if not hasattr(self, "prompt_encoder"): self.prompt_encoder = torch.nn.ModuleDict({}) self.prompt_tokens = {} transformer_backbone = None for name, module in self.base_model.named_children(): for param in module.parameters(): param.requires_grad = False if isinstance(module, PreTrainedModel): # Make sure to freeze Tranformers model if transformer_backbone is None: transformer_backbone = module self.transformer_backbone_name = name if transformer_backbone is None: transformer_backbone = self.base_model if config.num_transformer_submodules is None: config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 for named_param, value in list(transformer_backbone.named_parameters()): # for ZeRO-3, the tensor is sharded across accelerators and deepspeed modifies it to a tensor with shape [0] # the actual unsharded shape is stored in "ds_shape" attribute # special handling is needed in case the model is initialized in deepspeed.zero.Init() context or HfDeepSpeedConfig # has been called before # For reference refer to issue: https://github.com/huggingface/peft/issues/996 deepspeed_distributed_tensor_shape = getattr(value, "ds_shape", None) if value.shape[0] == self.base_model.config.vocab_size or ( deepspeed_distributed_tensor_shape is not None and deepspeed_distributed_tensor_shape[0] == self.base_model.config.vocab_size ): self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", "")) break if config.peft_type == PeftType.PROMPT_TUNING: prompt_encoder = PromptEmbedding(config, self.word_embeddings) elif config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompt_encoder = MultitaskPromptEmbedding(config, self.word_embeddings) elif config.peft_type == PeftType.P_TUNING: prompt_encoder = PromptEncoder(config) elif config.peft_type == PeftType.PREFIX_TUNING: prompt_encoder = PrefixEncoder(config) else: raise ValueError("Not supported") prompt_encoder = prompt_encoder.to(self.device) self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder})) self.prompt_tokens[adapter_name] = torch.arange( config.num_virtual_tokens * config.num_transformer_submodules ).long() def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel): r""" Prepares the model for gradient checkpointing if necessary """ if not ( getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) or getattr(model, "is_quantized", False) ): if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() elif hasattr(model, "get_input_embeddings"): def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) return model def get_prompt_embedding_to_save(self, adapter_name: str) -> torch.Tensor: """ Returns the prompt embedding to save when saving the model. Only applicable when using a prompt learning method. """ prompt_encoder = self.prompt_encoder[adapter_name] prompt_tokens = ( self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device) ) if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens] if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompt_embeddings = super(MultitaskPromptEmbedding, prompt_encoder).forward(prompt_tokens) else: prompt_embeddings = prompt_encoder(prompt_tokens) return prompt_embeddings[0].detach().cpu() def get_prompt(self, batch_size: int, task_ids: Optional[torch.Tensor] = None) -> torch.Tensor: """ Returns the virtual prompts to use for Peft. Only applicable when using a prompt learning method. """ peft_config = self.active_peft_config prompt_encoder = self.prompt_encoder[self.active_adapter] prompt_tokens = ( self.prompt_tokens[self.active_adapter] .unsqueeze(0) .expand(batch_size, -1) .to(prompt_encoder.embedding.weight.device) ) if peft_config.peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens] if peft_config.inference_mode: past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: past_key_values = prompt_encoder(prompt_tokens) if self.base_model_torch_dtype is not None: past_key_values = past_key_values.to(self.base_model_torch_dtype) past_key_values = past_key_values.view( batch_size, peft_config.num_virtual_tokens, peft_config.num_layers * 2, peft_config.num_attention_heads, peft_config.token_dim // peft_config.num_attention_heads, ) if peft_config.num_transformer_submodules == 2: past_key_values = torch.cat([past_key_values, past_key_values], dim=2) past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split( peft_config.num_transformer_submodules * 2 ) if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None: post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type] past_key_values = post_process_fn(past_key_values) return past_key_values else: if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompts = prompt_encoder(prompt_tokens, task_ids) else: if peft_config.inference_mode: prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: prompts = prompt_encoder(prompt_tokens) return prompts def get_nb_trainable_parameters(self) -> tuple[int, int]: r""" Returns the number of trainable parameters and the number of all parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in self.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel # Due to the design of 4bit linear layers from bitsandbytes # one needs to multiply the number of parameters by 2 to get # the correct number of parameters if param.__class__.__name__ == "Params4bit": num_params = num_params * 2 all_param += num_params if param.requires_grad: trainable_params += num_params return trainable_params, all_param def print_trainable_parameters(self) -> None: """ Prints the number of trainable parameters in the model. Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model. For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number of trainable parameters of the backbone transformer model which can be different. """ trainable_params, all_param = self.get_nb_trainable_parameters() print( f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param}" ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.base_model, name) def forward(self, *args: Any, **kwargs: Any): """ Forward pass of the model. """ return self.get_base_model()(*args, **kwargs) def _get_base_model_class(self, is_prompt_tuning=False): """ Returns the base model class. """ if not is_prompt_tuning: return self.base_model.model.__class__ return self.base_model.__class__ def disable_adapter(self): """ Context manager that disables the adapter module. Use this to run inference on the base model. Example: ```py >>> with model.disable_adapter(): ... model(inputs) ``` """ try: if self.peft_config[self.active_adapter].is_prompt_learning: # TODO: consider replacing this patching of methods with a more robust mechanism: setting a flag and # letting the underlying methods deal with it, same as how LoRA does it. old_forward = self.forward self.forward = self.base_model.forward old_prepare_inputs_for_generation = self.prepare_inputs_for_generation self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation else: self.base_model.disable_adapter_layers() yield finally: if self.peft_config[self.active_adapter].is_prompt_learning: self.forward = old_forward self.prepare_inputs_for_generation = old_prepare_inputs_for_generation else: self.base_model.enable_adapter_layers() def get_base_model(self) -> torch.nn.Module: """ Returns the base model. """ return ( self.base_model if (self.active_peft_config.is_prompt_learning or self.peft_type == PeftType.POLY) else self.base_model.model ) def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None: """ Add an adapter to the model based on the passed configuration. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. """ if peft_config.peft_type != self.peft_type: raise ValueError( f"Cannot combine adapters with different peft types. " f"Found {self.peft_type} and {peft_config.peft_type}." ) try: if peft_config.is_prompt_learning: self.peft_config[adapter_name] = peft_config if hasattr(self.config, "to_dict"): dict_config = self.config.to_dict() else: dict_config = self.config peft_config = _prepare_prompt_learning_config(peft_config, dict_config) self._setup_prompt_encoder(adapter_name) elif peft_config.is_adaption_prompt: self.base_model.add_adapter(adapter_name, peft_config) else: self.peft_config[adapter_name] = peft_config self.base_model.inject_adapter(self.base_model.model, adapter_name) except Exception: # something went wrong, roll back if adapter_name in self.peft_config: del self.peft_config[adapter_name] raise self.set_additional_trainable_modules(peft_config, adapter_name) def set_additional_trainable_modules(self, peft_config, adapter_name): if getattr(peft_config, "modules_to_save", None) is not None: if self.modules_to_save is None: self.modules_to_save = set(peft_config.modules_to_save) else: self.modules_to_save.update(peft_config.modules_to_save) _set_trainable(self, adapter_name) def _split_kwargs(cls, kwargs: dict[str, Any]): _kwargs_not_in_hf_hub_download_signature = ("use_auth_token",) hf_hub_download_kwargs = {} other_kwargs = {} for key, value in kwargs.items(): if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature: hf_hub_download_kwargs[key] = value else: other_kwargs[key] = value return hf_hub_download_kwargs, other_kwargs def load_adapter(self, model_id: str, adapter_name: str, is_trainable: bool = False, **kwargs: Any): """ Load a trained adapter into the model. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. kwargs: (`optional`): Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub. """ from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs) torch_device = infer_device() if adapter_name not in self.peft_config: # load the config peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, **hf_hub_download_kwargs, ) ].from_pretrained( model_id, **hf_hub_download_kwargs, ) if peft_config.is_prompt_learning and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: peft_config.inference_mode = not is_trainable self.add_adapter(adapter_name, peft_config) adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs) # load the weights into the model load_result = set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name) if ( (getattr(self, "hf_device_map", None) is not None) and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) and len(self.peft_config) == 1 ): device_map = kwargs.get("device_map", "auto") max_memory = kwargs.get("max_memory", None) offload_dir = kwargs.get("offload_folder", None) offload_index = kwargs.get("offload_index", None) dispatch_model_kwargs = {} # Safety checker for previous `accelerate` versions # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ if "offload_index" in inspect.signature(dispatch_model).parameters: dispatch_model_kwargs["offload_index"] = offload_index no_split_module_classes = self._no_split_modules if device_map != "sequential": max_memory = get_balanced_memory( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=(device_map == "balanced_low_0"), ) if isinstance(device_map, str): device_map = infer_auto_device_map( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes ) dispatch_model( self, device_map=device_map, offload_dir=offload_dir, **dispatch_model_kwargs, ) hook = AlignDevicesHook(io_same_device=True) if self.peft_config[adapter_name].is_prompt_learning: remove_hook_from_submodules(self.prompt_encoder) add_hook_to_module(self.get_base_model(), hook) # Set model in evaluation mode to deactivate Dropout modules by default if not is_trainable: self.eval() return load_result def set_adapter(self, adapter_name: str) -> None: """ Sets the active adapter. Only one adapter can be active at a time. Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str`): The name of the adapter to be set as active. The adapter must be loaded first. """ if adapter_name not in self.peft_config: raise ValueError(f"Adapter {adapter_name} not found.") self.active_adapter = adapter_name if not self.peft_config[adapter_name].is_prompt_learning: self.base_model.set_adapter(adapter_name) _set_adapter(self, adapter_name) def base_model_torch_dtype(self): return getattr(self.base_model, "dtype", None) def active_peft_config(self): return self.peft_config[self.active_adapter] def create_or_update_model_card(self, output_dir: str): """ Updates or create model card to include information about peft: 1. Adds `peft` library tag 2. Adds peft version 3. Adds base model info 4. Adds quantization information if it was used """ filename = os.path.join(output_dir, "README.md") card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData()) card.data["library_name"] = "peft" model_config = getattr(self, "config", None) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() if model_config is not None and "_name_or_path" in model_config: card.data["base_model"] = model_config["_name_or_path"] lines = card.text.splitlines() quantization_config = None if hasattr(model_config, "quantization_config"): quantization_config = self.config.quantization_config.to_dict() training_config_text = "" quantization_prefix = "The following `bitsandbytes` quantization config was used during training:" # Adds quantization information if it was used if quantization_config is not None: training_config_text += f"\n{quantization_prefix}\n" training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()]) training_config_text += "\n" training_procedure_heading = "## Training procedure" if quantization_prefix not in lines and bool(training_config_text): if training_procedure_heading in lines: lines.insert(lines.index(training_procedure_heading) + 2, training_config_text) else: lines.append(f"{training_procedure_heading}\n{training_config_text}") # Adds peft version framework_block_heading = "### Framework versions" if f"- PEFT {__version__}" not in lines: if framework_block_heading in lines: lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}") else: lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}") card.text = "\n".join(lines) card.save(filename) The provided code snippet includes necessary dependencies for implementing the `update_signature` function. Write a Python function `def update_signature(model: PeftModel, method: str = "all") -> None` to solve the following problem: Args: Updates the signature of a PeftModel include parents class signature for forward or generate method model (`PeftModel`): Peft model to update generate or forward signature method (`str`): method to update signature choose one of "forward", "generate", "all" Example: ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> from peft import get_peft_model, LoraConfig, TaskType, update_signature >>> model_name_or_path = "bigscience/mt0-large" >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) >>> peft_config = LoraConfig( ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ... ) >>> peft_model = get_peft_model(model, peft_config) >>> update_signature(peft_model) >>> help(peft_model.generate) ``` Here is the function: def update_signature(model: PeftModel, method: str = "all") -> None: """ Args: Updates the signature of a PeftModel include parents class signature for forward or generate method model (`PeftModel`): Peft model to update generate or forward signature method (`str`): method to update signature choose one of "forward", "generate", "all" Example: ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> from peft import get_peft_model, LoraConfig, TaskType, update_signature >>> model_name_or_path = "bigscience/mt0-large" >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) >>> peft_config = LoraConfig( ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ... ) >>> peft_model = get_peft_model(model, peft_config) >>> update_signature(peft_model) >>> help(peft_model.generate) ``` """ if method == "forward": update_forward_signature(model) elif method == "generate": update_generate_signature(model) elif method == "all": update_forward_signature(model) update_generate_signature(model) else: raise ValueError(f"method {method} is not supported please choose one of ['forward', 'generate', 'all']")
Args: Updates the signature of a PeftModel include parents class signature for forward or generate method model (`PeftModel`): Peft model to update generate or forward signature method (`str`): method to update signature choose one of "forward", "generate", "all" Example: ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> from peft import get_peft_model, LoraConfig, TaskType, update_signature >>> model_name_or_path = "bigscience/mt0-large" >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) >>> peft_config = LoraConfig( ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ... ) >>> peft_model = get_peft_model(model, peft_config) >>> update_signature(peft_model) >>> help(peft_model.generate) ```
161,413
from __future__ import annotations from typing import TYPE_CHECKING, Any import torch from .config import PeftConfig from .mixed_model import PeftMixedModel from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) from .tuners import ( AdaLoraConfig, AdaLoraModel, AdaptionPromptConfig, IA3Config, IA3Model, LoHaConfig, LoHaModel, LoKrConfig, LoKrModel, LoraConfig, LoraModel, MultitaskPromptTuningConfig, OFTConfig, OFTModel, PolyConfig, PolyModel, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, ) from .utils import _prepare_prompt_learning_config PEFT_TYPE_TO_CONFIG_MAPPING: dict[str, PeftConfig] = { "ADAPTION_PROMPT": AdaptionPromptConfig, "PROMPT_TUNING": PromptTuningConfig, "PREFIX_TUNING": PrefixTuningConfig, "P_TUNING": PromptEncoderConfig, "LORA": LoraConfig, "LOHA": LoHaConfig, "LOKR": LoKrConfig, "ADALORA": AdaLoraConfig, "IA3": IA3Config, "MULTITASK_PROMPT_TUNING": MultitaskPromptTuningConfig, "OFT": OFTConfig, "POLY": PolyConfig, } class PeftConfig(PeftConfigMixin): """ This is the base configuration class to store the configuration of a [`PeftModel`]. Args: peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform. inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode. """ base_model_name_or_path: Optional[str] = field( default=None, metadata={"help": "The name of the base model to use."} ) revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."}) peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"}) task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"}) inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"}) The provided code snippet includes necessary dependencies for implementing the `get_peft_config` function. Write a Python function `def get_peft_config(config_dict: dict[str, Any]) -> PeftConfig` to solve the following problem: Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. Here is the function: def get_peft_config(config_dict: dict[str, Any]) -> PeftConfig: """ Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. """ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict)
Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters.
161,414
from __future__ import annotations from typing import TYPE_CHECKING, Any import torch from .config import PeftConfig from .mixed_model import PeftMixedModel from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) from .tuners import ( AdaLoraConfig, AdaLoraModel, AdaptionPromptConfig, IA3Config, IA3Model, LoHaConfig, LoHaModel, LoKrConfig, LoKrModel, LoraConfig, LoraModel, MultitaskPromptTuningConfig, OFTConfig, OFTModel, PolyConfig, PolyModel, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, ) from .utils import _prepare_prompt_learning_config MODEL_TYPE_TO_PEFT_MODEL_MAPPING: dict[str, PeftModel] = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, "QUESTION_ANS": PeftModelForQuestionAnswering, "FEATURE_EXTRACTION": PeftModelForFeatureExtraction, } class PeftConfig(PeftConfigMixin): """ This is the base configuration class to store the configuration of a [`PeftModel`]. Args: peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform. inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode. """ base_model_name_or_path: Optional[str] = field( default=None, metadata={"help": "The name of the base model to use."} ) revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."}) peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"}) task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"}) inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"}) class PeftMixedModel(PushToHubMixin, torch.nn.Module): """ PeftMixedModel for loading mixing different types of adapters for inference. This class does not support loading/saving, and it shouldn't usually be initialized directly. Instead, use `get_peft_model` with the argument `mixed=True`. <Tip> Read the [Mixed adapter types](https://huggingface.co/docs/peft/en/developer_guides/mixed_models) guide to learn more about using different adapter types. </Tip> Example: ```py >>> from peft import get_peft_model >>> base_model = ... # load the base model, e.g. from transformers >>> peft_model = PeftMixedModel.from_pretrained(base_model, path_to_adapter1, "adapter1").eval() >>> peft_model.load_adapter(path_to_adapter2, "adapter2") >>> peft_model.set_adapter(["adapter1", "adapter2"]) # activate both adapters >>> peft_model(data) # forward pass using both adapters ``` Args: model (`torch.nn.Module`): The model to be tuned. config (`PeftConfig`): The config of the model to be tuned. The adapter type must be compatible. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the first adapter. """ def __init__(self, model: nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__() _check_config_compatible(peft_config) _prepare_model_for_gradient_checkpointing(model) self.modules_to_save = None self.base_model = MixedModel(model, {adapter_name: peft_config}, adapter_name) self.set_modules_to_save(peft_config, adapter_name) self.config = getattr(model, "config", {"model_type": "custom"}) # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected # behavior we disable that in this line. if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): self.base_model.config.pretraining_tp = 1 def peft_config(self) -> dict[str, PeftConfig]: return self.base_model.peft_config def active_adapter(self) -> str: return self.base_model.active_adapter def active_adapters(self) -> list[str]: return self.base_model.active_adapters def get_nb_trainable_parameters(self): r""" Returns the number of trainable parameters and number of all parameters in the model. """ # note: same as PeftModel.get_nb_trainable_parameters trainable_params = 0 all_param = 0 for _, param in self.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel # Due to the design of 4bit linear layers from bitsandbytes # one needs to multiply the number of parameters by 2 to get # the correct number of parameters if param.__class__.__name__ == "Params4bit": num_params = num_params * 2 all_param += num_params if param.requires_grad: trainable_params += num_params return trainable_params, all_param def print_trainable_parameters(self): """ Prints the number of trainable parameters in the model. Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model. For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number of trainable parameters of the backbone transformer model which can be different. """ # note: same as PeftModel.print_trainable_parameters trainable_params, all_param = self.get_nb_trainable_parameters() print( f"trainable params: {trainable_params:,d} || " f"all params: {all_param:,d} || " f"trainable%: {100 * trainable_params / all_param:.4f}" ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.base_model, name) def forward(self, *args: Any, **kwargs: Any): """ Forward pass of the model. """ return self.base_model(*args, **kwargs) def generate(self, *args: Any, **kwargs: Any): """ Generate output. """ return self.base_model.generate(*args, **kwargs) def disable_adapter(self): """ Disables the adapter module. """ try: self.base_model.disable_adapter_layers() yield finally: self.base_model.enable_adapter_layers() def add_adapter(self, adapter_name: str, peft_config: PeftConfig): _check_config_compatible(peft_config) try: self.peft_config[adapter_name] = peft_config self.base_model.inject_adapter(self, adapter_name) except Exception: # something went wrong, roll back if adapter_name in self.peft_config: del self.peft_config[adapter_name] raise self.set_modules_to_save(peft_config, adapter_name) def set_modules_to_save(self, peft_config: PeftConfig, adapter_name: str) -> None: if (modules_to_save := getattr(peft_config, "modules_to_save", None)) is None: return if self.modules_to_save is None: self.modules_to_save = set(modules_to_save) else: self.modules_to_save.update(modules_to_save) _set_trainable(self, adapter_name) def set_adapter(self, adapter_name: Union[str, list[str]]) -> None: """ Sets the active adapter(s) for the model. Note that the order in which the adapters are applied during the forward pass may not be the same as the order in which they are passed to this function. Instead, the order during the forward pass is determined by the order in which the adapters were loaded into the model. The active adapters only determine which adapters are active during the forward pass, but not the order in which they are applied. Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `List[str]`): The name of the adapter(s) to be activated. """ if isinstance(adapter_name, str): adapter_name = [adapter_name] mismatched = set(adapter_name) - set(self.peft_config.keys()) if mismatched: raise ValueError( f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" ) self.base_model.set_adapter(adapter_name) _set_adapter(self, adapter_name) def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None: if isinstance(adapter_name, str): adapter_name = [adapter_name] mismatched = set(adapter_name) - set(self.peft_config.keys()) if mismatched: raise ValueError( f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" ) self.base_model.delete_adapter(adapter_name) def merge_and_unload(self, *args: Any, **kwargs: Any): r""" This method merges the adapter layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ return self.base_model.merge_and_unload(*args, **kwargs) def unload(self, *args: Any, **kwargs: Any): """ Gets back the base model by removing all the adapter modules without merging. This gives back the original base model. """ return self.base_model.unload(*args, **kwargs) def _split_kwargs(cls, kwargs: dict[str, Any]): return PeftModel._split_kwargs(kwargs) def load_adapter(self, model_id: str, adapter_name: str, *args: Any, **kwargs: Any): output = PeftModel.load_adapter(self, model_id, adapter_name, *args, **kwargs) # TODO: not quite clear why this is necessary but tests fail without it self.set_adapter(self.active_adapters) return output def create_or_update_model_card(self, output_dir: str): raise NotImplementedError(f"Model card creation is not supported for {self.__class__.__name__} (yet).") def save_pretrained( self, save_directory: str, safe_serialization: bool = False, selected_adapters: Optional[list[str]] = None, **kwargs: Any, ): raise NotImplementedError(f"Saving is not supported for {self.__class__.__name__} (yet).") def from_pretrained( cls, model: nn.Module, model_id: str | os.PathLike, adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, **kwargs: Any, ): r""" Instantiate a PEFT mixed model from a pretrained model and loaded PEFT weights. Note that the passed `model` may be modified inplace. Args: model (`nn.Module`): The model to be adapted. model_id (`str` or `os.PathLike`): The name of the PEFT configuration to use. Can be either: - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` method (`./my_peft_config_directory/`). adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to be loaded. This is useful for loading multiple adapters. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and use for inference config ([`~peft.PeftConfig`], *optional*): The configuration object to use instead of an automatically loaded configuration. This configuration object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already loaded before calling `from_pretrained`. kwargs: (`optional`): Additional keyword arguments passed along to the specific PEFT configuration class. """ # note: adapted from PeftModel.from_pretrained from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING # load the config if config is None: config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, subfolder=kwargs.get("subfolder", None), revision=kwargs.get("revision", None), cache_dir=kwargs.get("cache_dir", None), use_auth_token=kwargs.get("use_auth_token", None), ) ].from_pretrained(model_id, **kwargs) elif isinstance(config, PeftConfig): config.inference_mode = not is_trainable else: raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") # note: this is different from PeftModel.from_pretrained if config.peft_type not in PEFT_TYPE_TO_MODEL_MAPPING: raise ValueError(f"Adapter of type {config.peft_type} is not supported for mixed models.") if (getattr(model, "hf_device_map", None) is not None) and len( set(model.hf_device_map.values()).intersection({"cpu", "disk"}) ) > 0: remove_hook_from_submodules(model) if config.is_prompt_learning and is_trainable: # note: should not be possible to reach, but just in case raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: config.inference_mode = not is_trainable # note: this is different from PeftModel.from_pretrained, we always return a PeftMixedModel model = cls(model, config, adapter_name) model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) return model class PeftModel(PushToHubMixin, torch.nn.Module): """ Base model encompassing various Peft methods. Args: model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. peft_config ([`PeftConfig`]): The configuration of the Peft model. adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. **Attributes**: - **base_model** ([`torch.nn.Module`]) -- The base transformer model used for Peft. - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when saving the model. - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if using [`PromptLearningConfig`]. - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if using [`PromptLearningConfig`]. - **transformer_backbone_name** (`str`) -- The name of the transformer backbone in the base model if using [`PromptLearningConfig`]. - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone in the base model if using [`PromptLearningConfig`]. """ def __init__(self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__() self.modules_to_save = None self.active_adapter = adapter_name self.peft_type = peft_config.peft_type self._is_prompt_learning = peft_config.is_prompt_learning if self._is_prompt_learning: self._peft_config = {adapter_name: peft_config} self.base_model = model self.add_adapter(adapter_name, peft_config) else: self._peft_config = None cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type] self.base_model = cls(model, {adapter_name: peft_config}, adapter_name) self.set_additional_trainable_modules(peft_config, adapter_name) if getattr(model, "is_gradient_checkpointing", True): model = self._prepare_model_for_gradient_checkpointing(model) # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected # behavior we disable that in this line. if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): self.base_model.config.pretraining_tp = 1 def peft_config(self) -> dict[str, PeftConfig]: if self._is_prompt_learning: return self._peft_config return self.base_model.peft_config def active_adapters(self) -> list[str]: try: adapters = self.base_model.active_adapters except AttributeError: adapters = self.active_adapter if isinstance(adapters, str): adapters = [adapters] return adapters def peft_config(self, value: dict[str, PeftConfig]): if self._is_prompt_learning: self._peft_config = value else: self.base_model.peft_config = value def save_pretrained( self, save_directory: str, safe_serialization: bool = True, selected_adapters: Optional[list[str]] = None, save_embedding_layers: Union[str, bool] = "auto", is_main_process: bool = True, **kwargs: Any, ) -> None: r""" This function saves the adapter model and the adapter configuration files to a directory, so that it can be reloaded using the [`PeftModel.from_pretrained`] class method, and also used by the [`PeftModel.push_to_hub`] method. Args: save_directory (`str`): Directory where the adapter model and configuration files will be saved (will be created if it does not exist). safe_serialization (`bool`, *optional*): Whether to save the adapter files in safetensors format, defaults to `True`. selected_adapters (`List[str]`, *optional*): A list of adapters to be saved. If `None`, will default to all adapters. save_embedding_layers (`Union[bool, str]`, *optional*, defaults to `"auto"`): If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. and automatically sets the boolean flag. This only works for 🤗 transformers models. is_main_process (`bool`, *optional*): Whether the process calling this is the main process or not. Will default to `True`. Will not save the checkpoint if not on the main process, which is important for multi device setups (e.g. DDP). kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the `push_to_hub` method. """ if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") if selected_adapters is None: selected_adapters = list(self.peft_config.keys()) else: if any( selected_adapter_name not in list(self.peft_config.keys()) for selected_adapter_name in selected_adapters ): raise ValueError( f"You passed an invalid `selected_adapters` arguments, current supported adapter names are" f" {list(self.peft_config.keys())} - got {selected_adapters}." ) if is_main_process: os.makedirs(save_directory, exist_ok=True) self.create_or_update_model_card(save_directory) for adapter_name in selected_adapters: peft_config = self.peft_config[adapter_name] # save only the trainable weights output_state_dict = get_peft_model_state_dict( self, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name, save_embedding_layers=save_embedding_layers, ) output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory os.makedirs(output_dir, exist_ok=True) if is_main_process and safe_serialization: # Section copied from: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L2111-L2134 # Safetensors does not allow tensor aliasing. # We're going to remove aliases before saving ptrs = collections.defaultdict(list) for name, tensor in output_state_dict.items(): # Sometimes in the state_dict we have non-tensor objects. # e.g. in bitsandbytes we have some `str` objects in the state_dict if isinstance(tensor, torch.Tensor): ptrs[id_tensor_storage(tensor)].append(name) else: # In the non-tensor case, fall back to the pointer of the object itself ptrs[id(tensor)].append(name) # These are all the pointers of shared tensors. shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} for _, names in shared_ptrs.items(): # Here we just clone the shared tensors to avoid tensor aliasing which is # not supported in safetensors. for shared_tensor_name in names[1:]: output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone() safe_save_file( output_state_dict, os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME), metadata={"format": "pt"}, ) elif is_main_process: torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) # save the config and change the inference mode to `True` if peft_config.base_model_name_or_path is None: peft_config.base_model_name_or_path = ( self.base_model.__dict__.get("name_or_path", None) if peft_config.is_prompt_learning else self.base_model.model.__dict__.get("name_or_path", None) ) inference_mode = peft_config.inference_mode peft_config.inference_mode = True if peft_config.task_type is None: # deal with auto mapping base_model_class = self._get_base_model_class( is_prompt_tuning=peft_config.is_prompt_learning, ) parent_library = base_model_class.__module__ auto_mapping_dict = { "base_model_class": base_model_class.__name__, "parent_library": parent_library, } else: auto_mapping_dict = None if is_main_process: peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict) peft_config.inference_mode = inference_mode def from_pretrained( cls, model: torch.nn.Module, model_id: Union[str, os.PathLike], adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, **kwargs: Any, ) -> PeftModel: r""" Instantiate a PEFT model from a pretrained model and loaded PEFT weights. Note that the passed `model` may be modified inplace. Args: model ([`torch.nn.Module`]): The model to be adapted. For 🤗 Transformers models, the model should be initialized with the [`~transformers.PreTrainedModel.from_pretrained`]. model_id (`str` or `os.PathLike`): The name of the PEFT configuration to use. Can be either: - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` method (`./my_peft_config_directory/`). adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to be loaded. This is useful for loading multiple adapters. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. config ([`~peft.PeftConfig`], *optional*): The configuration object to use instead of an automatically loaded configuration. This configuration object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already loaded before calling `from_pretrained`. kwargs: (`optional`): Additional keyword arguments passed along to the specific PEFT configuration class. """ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING # load the config if config is None: config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, subfolder=kwargs.get("subfolder", None), revision=kwargs.get("revision", None), cache_dir=kwargs.get("cache_dir", None), use_auth_token=kwargs.get("use_auth_token", None), token=kwargs.get("token", None), ) ].from_pretrained(model_id, **kwargs) elif isinstance(config, PeftConfig): config.inference_mode = not is_trainable else: raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") if (getattr(model, "hf_device_map", None) is not None) and len( set(model.hf_device_map.values()).intersection({"cpu", "disk"}) ) > 0: remove_hook_from_submodules(model) if config.is_prompt_learning and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: config.inference_mode = not is_trainable if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys(): model = cls(model, config, adapter_name) else: model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name) model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) return model def _setup_prompt_encoder(self, adapter_name: str): config = self.peft_config[adapter_name] if not hasattr(self, "prompt_encoder"): self.prompt_encoder = torch.nn.ModuleDict({}) self.prompt_tokens = {} transformer_backbone = None for name, module in self.base_model.named_children(): for param in module.parameters(): param.requires_grad = False if isinstance(module, PreTrainedModel): # Make sure to freeze Tranformers model if transformer_backbone is None: transformer_backbone = module self.transformer_backbone_name = name if transformer_backbone is None: transformer_backbone = self.base_model if config.num_transformer_submodules is None: config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 for named_param, value in list(transformer_backbone.named_parameters()): # for ZeRO-3, the tensor is sharded across accelerators and deepspeed modifies it to a tensor with shape [0] # the actual unsharded shape is stored in "ds_shape" attribute # special handling is needed in case the model is initialized in deepspeed.zero.Init() context or HfDeepSpeedConfig # has been called before # For reference refer to issue: https://github.com/huggingface/peft/issues/996 deepspeed_distributed_tensor_shape = getattr(value, "ds_shape", None) if value.shape[0] == self.base_model.config.vocab_size or ( deepspeed_distributed_tensor_shape is not None and deepspeed_distributed_tensor_shape[0] == self.base_model.config.vocab_size ): self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", "")) break if config.peft_type == PeftType.PROMPT_TUNING: prompt_encoder = PromptEmbedding(config, self.word_embeddings) elif config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompt_encoder = MultitaskPromptEmbedding(config, self.word_embeddings) elif config.peft_type == PeftType.P_TUNING: prompt_encoder = PromptEncoder(config) elif config.peft_type == PeftType.PREFIX_TUNING: prompt_encoder = PrefixEncoder(config) else: raise ValueError("Not supported") prompt_encoder = prompt_encoder.to(self.device) self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder})) self.prompt_tokens[adapter_name] = torch.arange( config.num_virtual_tokens * config.num_transformer_submodules ).long() def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel): r""" Prepares the model for gradient checkpointing if necessary """ if not ( getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) or getattr(model, "is_quantized", False) ): if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() elif hasattr(model, "get_input_embeddings"): def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) return model def get_prompt_embedding_to_save(self, adapter_name: str) -> torch.Tensor: """ Returns the prompt embedding to save when saving the model. Only applicable when using a prompt learning method. """ prompt_encoder = self.prompt_encoder[adapter_name] prompt_tokens = ( self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device) ) if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens] if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompt_embeddings = super(MultitaskPromptEmbedding, prompt_encoder).forward(prompt_tokens) else: prompt_embeddings = prompt_encoder(prompt_tokens) return prompt_embeddings[0].detach().cpu() def get_prompt(self, batch_size: int, task_ids: Optional[torch.Tensor] = None) -> torch.Tensor: """ Returns the virtual prompts to use for Peft. Only applicable when using a prompt learning method. """ peft_config = self.active_peft_config prompt_encoder = self.prompt_encoder[self.active_adapter] prompt_tokens = ( self.prompt_tokens[self.active_adapter] .unsqueeze(0) .expand(batch_size, -1) .to(prompt_encoder.embedding.weight.device) ) if peft_config.peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens] if peft_config.inference_mode: past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: past_key_values = prompt_encoder(prompt_tokens) if self.base_model_torch_dtype is not None: past_key_values = past_key_values.to(self.base_model_torch_dtype) past_key_values = past_key_values.view( batch_size, peft_config.num_virtual_tokens, peft_config.num_layers * 2, peft_config.num_attention_heads, peft_config.token_dim // peft_config.num_attention_heads, ) if peft_config.num_transformer_submodules == 2: past_key_values = torch.cat([past_key_values, past_key_values], dim=2) past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split( peft_config.num_transformer_submodules * 2 ) if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None: post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type] past_key_values = post_process_fn(past_key_values) return past_key_values else: if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompts = prompt_encoder(prompt_tokens, task_ids) else: if peft_config.inference_mode: prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: prompts = prompt_encoder(prompt_tokens) return prompts def get_nb_trainable_parameters(self) -> tuple[int, int]: r""" Returns the number of trainable parameters and the number of all parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in self.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel # Due to the design of 4bit linear layers from bitsandbytes # one needs to multiply the number of parameters by 2 to get # the correct number of parameters if param.__class__.__name__ == "Params4bit": num_params = num_params * 2 all_param += num_params if param.requires_grad: trainable_params += num_params return trainable_params, all_param def print_trainable_parameters(self) -> None: """ Prints the number of trainable parameters in the model. Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model. For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number of trainable parameters of the backbone transformer model which can be different. """ trainable_params, all_param = self.get_nb_trainable_parameters() print( f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param}" ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.base_model, name) def forward(self, *args: Any, **kwargs: Any): """ Forward pass of the model. """ return self.get_base_model()(*args, **kwargs) def _get_base_model_class(self, is_prompt_tuning=False): """ Returns the base model class. """ if not is_prompt_tuning: return self.base_model.model.__class__ return self.base_model.__class__ def disable_adapter(self): """ Context manager that disables the adapter module. Use this to run inference on the base model. Example: ```py >>> with model.disable_adapter(): ... model(inputs) ``` """ try: if self.peft_config[self.active_adapter].is_prompt_learning: # TODO: consider replacing this patching of methods with a more robust mechanism: setting a flag and # letting the underlying methods deal with it, same as how LoRA does it. old_forward = self.forward self.forward = self.base_model.forward old_prepare_inputs_for_generation = self.prepare_inputs_for_generation self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation else: self.base_model.disable_adapter_layers() yield finally: if self.peft_config[self.active_adapter].is_prompt_learning: self.forward = old_forward self.prepare_inputs_for_generation = old_prepare_inputs_for_generation else: self.base_model.enable_adapter_layers() def get_base_model(self) -> torch.nn.Module: """ Returns the base model. """ return ( self.base_model if (self.active_peft_config.is_prompt_learning or self.peft_type == PeftType.POLY) else self.base_model.model ) def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None: """ Add an adapter to the model based on the passed configuration. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. """ if peft_config.peft_type != self.peft_type: raise ValueError( f"Cannot combine adapters with different peft types. " f"Found {self.peft_type} and {peft_config.peft_type}." ) try: if peft_config.is_prompt_learning: self.peft_config[adapter_name] = peft_config if hasattr(self.config, "to_dict"): dict_config = self.config.to_dict() else: dict_config = self.config peft_config = _prepare_prompt_learning_config(peft_config, dict_config) self._setup_prompt_encoder(adapter_name) elif peft_config.is_adaption_prompt: self.base_model.add_adapter(adapter_name, peft_config) else: self.peft_config[adapter_name] = peft_config self.base_model.inject_adapter(self.base_model.model, adapter_name) except Exception: # something went wrong, roll back if adapter_name in self.peft_config: del self.peft_config[adapter_name] raise self.set_additional_trainable_modules(peft_config, adapter_name) def set_additional_trainable_modules(self, peft_config, adapter_name): if getattr(peft_config, "modules_to_save", None) is not None: if self.modules_to_save is None: self.modules_to_save = set(peft_config.modules_to_save) else: self.modules_to_save.update(peft_config.modules_to_save) _set_trainable(self, adapter_name) def _split_kwargs(cls, kwargs: dict[str, Any]): _kwargs_not_in_hf_hub_download_signature = ("use_auth_token",) hf_hub_download_kwargs = {} other_kwargs = {} for key, value in kwargs.items(): if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature: hf_hub_download_kwargs[key] = value else: other_kwargs[key] = value return hf_hub_download_kwargs, other_kwargs def load_adapter(self, model_id: str, adapter_name: str, is_trainable: bool = False, **kwargs: Any): """ Load a trained adapter into the model. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. kwargs: (`optional`): Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub. """ from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs) torch_device = infer_device() if adapter_name not in self.peft_config: # load the config peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, **hf_hub_download_kwargs, ) ].from_pretrained( model_id, **hf_hub_download_kwargs, ) if peft_config.is_prompt_learning and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: peft_config.inference_mode = not is_trainable self.add_adapter(adapter_name, peft_config) adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs) # load the weights into the model load_result = set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name) if ( (getattr(self, "hf_device_map", None) is not None) and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) and len(self.peft_config) == 1 ): device_map = kwargs.get("device_map", "auto") max_memory = kwargs.get("max_memory", None) offload_dir = kwargs.get("offload_folder", None) offload_index = kwargs.get("offload_index", None) dispatch_model_kwargs = {} # Safety checker for previous `accelerate` versions # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ if "offload_index" in inspect.signature(dispatch_model).parameters: dispatch_model_kwargs["offload_index"] = offload_index no_split_module_classes = self._no_split_modules if device_map != "sequential": max_memory = get_balanced_memory( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=(device_map == "balanced_low_0"), ) if isinstance(device_map, str): device_map = infer_auto_device_map( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes ) dispatch_model( self, device_map=device_map, offload_dir=offload_dir, **dispatch_model_kwargs, ) hook = AlignDevicesHook(io_same_device=True) if self.peft_config[adapter_name].is_prompt_learning: remove_hook_from_submodules(self.prompt_encoder) add_hook_to_module(self.get_base_model(), hook) # Set model in evaluation mode to deactivate Dropout modules by default if not is_trainable: self.eval() return load_result def set_adapter(self, adapter_name: str) -> None: """ Sets the active adapter. Only one adapter can be active at a time. Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str`): The name of the adapter to be set as active. The adapter must be loaded first. """ if adapter_name not in self.peft_config: raise ValueError(f"Adapter {adapter_name} not found.") self.active_adapter = adapter_name if not self.peft_config[adapter_name].is_prompt_learning: self.base_model.set_adapter(adapter_name) _set_adapter(self, adapter_name) def base_model_torch_dtype(self): return getattr(self.base_model, "dtype", None) def active_peft_config(self): return self.peft_config[self.active_adapter] def create_or_update_model_card(self, output_dir: str): """ Updates or create model card to include information about peft: 1. Adds `peft` library tag 2. Adds peft version 3. Adds base model info 4. Adds quantization information if it was used """ filename = os.path.join(output_dir, "README.md") card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData()) card.data["library_name"] = "peft" model_config = getattr(self, "config", None) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() if model_config is not None and "_name_or_path" in model_config: card.data["base_model"] = model_config["_name_or_path"] lines = card.text.splitlines() quantization_config = None if hasattr(model_config, "quantization_config"): quantization_config = self.config.quantization_config.to_dict() training_config_text = "" quantization_prefix = "The following `bitsandbytes` quantization config was used during training:" # Adds quantization information if it was used if quantization_config is not None: training_config_text += f"\n{quantization_prefix}\n" training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()]) training_config_text += "\n" training_procedure_heading = "## Training procedure" if quantization_prefix not in lines and bool(training_config_text): if training_procedure_heading in lines: lines.insert(lines.index(training_procedure_heading) + 2, training_config_text) else: lines.append(f"{training_procedure_heading}\n{training_config_text}") # Adds peft version framework_block_heading = "### Framework versions" if f"- PEFT {__version__}" not in lines: if framework_block_heading in lines: lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}") else: lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}") card.text = "\n".join(lines) card.save(filename) The provided code snippet includes necessary dependencies for implementing the `get_peft_model` function. Write a Python function `def get_peft_model( model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default", mixed: bool = False ) -> PeftModel | PeftMixedModel` to solve the following problem: Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). mixed (`bool`, `optional`, defaults to `False`): Whether to allow mixing different (compatible) adapter types. Here is the function: def get_peft_model( model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default", mixed: bool = False ) -> PeftModel | PeftMixedModel: """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). mixed (`bool`, `optional`, defaults to `False`): Whether to allow mixing different (compatible) adapter types. """ model_config = getattr(model, "config", {"model_type": "custom"}) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if mixed: return PeftMixedModel(model, peft_config, adapter_name=adapter_name) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning: return PeftModel(model, peft_config, adapter_name=adapter_name) if peft_config.is_prompt_learning: peft_config = _prepare_prompt_learning_config(peft_config, model_config) return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name)
Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). mixed (`bool`, `optional`, defaults to `False`): Whether to allow mixing different (compatible) adapter types.
161,415
from __future__ import annotations from typing import TYPE_CHECKING, Any import torch from .config import PeftConfig from .mixed_model import PeftMixedModel from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) from .tuners import ( AdaLoraConfig, AdaLoraModel, AdaptionPromptConfig, IA3Config, IA3Model, LoHaConfig, LoHaModel, LoKrConfig, LoKrModel, LoraConfig, LoraModel, MultitaskPromptTuningConfig, OFTConfig, OFTModel, PolyConfig, PolyModel, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, ) from .utils import _prepare_prompt_learning_config PEFT_TYPE_TO_TUNER_MAPPING = { "LORA": LoraModel, "LOHA": LoHaModel, "LOKR": LoKrModel, "ADALORA": AdaLoraModel, "IA3": IA3Model, "OFT": OFTModel, "POLY": PolyModel, } class PeftConfig(PeftConfigMixin): """ This is the base configuration class to store the configuration of a [`PeftModel`]. Args: peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform. inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode. """ base_model_name_or_path: Optional[str] = field( default=None, metadata={"help": "The name of the base model to use."} ) revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."}) peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"}) task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"}) inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"}) The provided code snippet includes necessary dependencies for implementing the `inject_adapter_in_model` function. Write a Python function `def inject_adapter_in_model( peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default" ) -> torch.nn.Module` to solve the following problem: r""" A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning methods and adaption prompt. Make sure to have the correct `target_names` set in the `peft_config` object. The API calls `get_peft_model` under the hood but would be restricted only to non-prompt learning methods. Args: peft_config (`PeftConfig`): Configuration object containing the parameters of the Peft model. model (`torch.nn.Module`): The input model where the adapter will be injected. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). Here is the function: def inject_adapter_in_model( peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default" ) -> torch.nn.Module: r""" A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning methods and adaption prompt. Make sure to have the correct `target_names` set in the `peft_config` object. The API calls `get_peft_model` under the hood but would be restricted only to non-prompt learning methods. Args: peft_config (`PeftConfig`): Configuration object containing the parameters of the Peft model. model (`torch.nn.Module`): The input model where the adapter will be injected. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). """ if peft_config.is_prompt_learning or peft_config.is_adaption_prompt: raise ValueError("`create_and_replace` does not support prompt learning and adaption prompt yet.") if peft_config.peft_type not in PEFT_TYPE_TO_TUNER_MAPPING.keys(): raise ValueError( f"`inject_adapter_in_model` does not support {peft_config.peft_type} yet. Please use `get_peft_model`." ) tuner_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type] # By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules. peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name) return peft_model.model
r""" A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning methods and adaption prompt. Make sure to have the correct `target_names` set in the `peft_config` object. The API calls `get_peft_model` under the hood but would be restricted only to non-prompt learning methods. Args: peft_config (`PeftConfig`): Configuration object containing the parameters of the Peft model. model (`torch.nn.Module`): The input model where the adapter will be injected. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the adapter to be injected, if not provided, the default adapter name is used ("default").
161,416
from contextlib import contextmanager import packaging.version import torch import transformers The provided code snippet includes necessary dependencies for implementing the `gather_params_ctx` function. Write a Python function `def gather_params_ctx(module: torch.nn.Module, modifier_rank: int = 0)` to solve the following problem: Call DeepSpeed GatheredParameters context manager if DeepSpeed is enabled, otherwise do nothing. Here is the function: def gather_params_ctx(module: torch.nn.Module, modifier_rank: int = 0): """Call DeepSpeed GatheredParameters context manager if DeepSpeed is enabled, otherwise do nothing.""" if packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.33.0"): from transformers.integrations import is_deepspeed_zero3_enabled else: from transformers.deepspeed import is_deepspeed_zero3_enabled if not is_deepspeed_zero3_enabled(): yield return import deepspeed params_to_gather = module.parameters() with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=modifier_rank): yield return
Call DeepSpeed GatheredParameters context manager if DeepSpeed is enabled, otherwise do nothing.
161,417
from contextlib import contextmanager import packaging.version import torch import transformers The provided code snippet includes necessary dependencies for implementing the `dequantize_bnb_weight` function. Write a Python function `def dequantize_bnb_weight(weight: torch.nn.Parameter, state=None)` to solve the following problem: Helper function to dequantize 4bit or 8bit bnb weights. If the weight is not a bnb quantized weight, it will be returned as is. Here is the function: def dequantize_bnb_weight(weight: torch.nn.Parameter, state=None): """ Helper function to dequantize 4bit or 8bit bnb weights. If the weight is not a bnb quantized weight, it will be returned as is. """ if not isinstance(weight, torch.nn.Parameter): raise TypeError(f"Input weight should be of type nn.Parameter, got {type(weight)} instead") cls_name = weight.__class__.__name__ if cls_name not in ("Params4bit", "Int8Params"): return weight import bitsandbytes as bnb if cls_name == "Params4bit": return bnb.functional.dequantize_4bit(weight.data, weight.quant_state) if state.SCB is None: state.SCB = weight.SCB im = torch.eye(weight.data.shape[-1]).contiguous().half().to(weight.device) im, imt, SCim, SCimt, coo_tensorim = bnb.functional.double_quant(im) im, Sim = bnb.functional.transform(im, "col32") if state.CxB is None: state.CxB, state.SB = bnb.functional.transform(weight.data, to_order=state.formatB) out32, Sout32 = bnb.functional.igemmlt(im, state.CxB, Sim, state.SB) return bnb.functional.mm_dequant(out32, Sout32, SCim, state.SCB, bias=None).t()
Helper function to dequantize 4bit or 8bit bnb weights. If the weight is not a bnb quantized weight, it will be returned as is.
161,418
import os import warnings from typing import Optional import torch from huggingface_hub import file_exists, hf_hub_download from huggingface_hub.utils import EntryNotFoundError from safetensors.torch import load_file as safe_load_file from .other import ( EMBEDDING_LAYER_NAMES, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, check_file_exists_on_hf_hub, infer_device, ) from .peft_types import PeftType def has_valid_embedding_base_layer(layer): """Check if the layer has an embedding base layer""" return hasattr(layer, "base_layer") and isinstance(layer.base_layer, (torch.nn.Linear, torch.nn.Embedding)) def get_embedding_layer_name(model, layer, is_embedding_in_target_modules): """Get the name of the embedding module for a given layer.""" for name, module in model.named_modules(): if (not is_embedding_in_target_modules and module == layer) or module == getattr(layer, "base_layer", None): return name return None def check_file_exists_on_hf_hub(repo_id: str, filename: str, **kwargs) -> Optional[bool]: """Check if a file exists on HF Hub, if check was not successful returns None instead of erroring. Respect offline mode if set. """ exists: Optional[bool] = None if str_to_bool(os.environ.get("HF_HUB_OFFLINE", "0")): # user set offline mode, cannot check return exists try: exists = file_exists(repo_id, filename, **kwargs) except (HFValidationError, EntryNotFoundError): # error, exists stays None pass except Exception as e: warnings.warn( f"Unable to fetch remote file due to the following error {e} - silently ignoring the lookup" f" for the file {filename} in {repo_id}." ) return exists class PeftType(str, enum.Enum): """ Enum class for the different types of adapters in PEFT. Supported PEFT types: - PROMPT_TUNING - MULTITASK_PROMPT_TUNING - P_TUNING - PREFIX_TUNING - LORA - ADALORA - ADAPTION_PROMPT - IA3 - LOHA - LOKR - OFT """ PROMPT_TUNING = "PROMPT_TUNING" MULTITASK_PROMPT_TUNING = "MULTITASK_PROMPT_TUNING" P_TUNING = "P_TUNING" PREFIX_TUNING = "PREFIX_TUNING" LORA = "LORA" ADALORA = "ADALORA" ADAPTION_PROMPT = "ADAPTION_PROMPT" IA3 = "IA3" LOHA = "LOHA" LOKR = "LOKR" OFT = "OFT" POLY = "POLY" The provided code snippet includes necessary dependencies for implementing the `get_peft_model_state_dict` function. Write a Python function `def get_peft_model_state_dict( model, state_dict=None, adapter_name="default", unwrap_compiled=False, save_embedding_layers="auto" )` to solve the following problem: Get the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP, the model should be the underlying model/unwrapped model (i.e. model.module). state_dict (`dict`, *optional*, defaults to `None`): The state dict of the model. If not provided, the state dict of the passed model will be used. adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter whose state dict should be returned. unwrap_compiled (`bool`, *optional*, defaults to `False`): Whether to unwrap the model if torch.compile was used. save_embedding_layers (`Union[bool, str]`, , *optional*, defaults to `auto`): If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. Based on it sets the boolean flag. This only works for 🤗 transformers models. Here is the function: def get_peft_model_state_dict( model, state_dict=None, adapter_name="default", unwrap_compiled=False, save_embedding_layers="auto" ): """ Get the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP, the model should be the underlying model/unwrapped model (i.e. model.module). state_dict (`dict`, *optional*, defaults to `None`): The state dict of the model. If not provided, the state dict of the passed model will be used. adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter whose state dict should be returned. unwrap_compiled (`bool`, *optional*, defaults to `False`): Whether to unwrap the model if torch.compile was used. save_embedding_layers (`Union[bool, str]`, , *optional*, defaults to `auto`): If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. Based on it sets the boolean flag. This only works for 🤗 transformers models. """ if unwrap_compiled: model = getattr(model, "_orig_mod", model) config = model.peft_config[adapter_name] if state_dict is None: state_dict = model.state_dict() if config.peft_type in (PeftType.LORA, PeftType.ADALORA): # to_return = lora_state_dict(model, bias=model.peft_config.bias) # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py` # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP bias = config.bias if bias == "none": to_return = {k: state_dict[k] for k in state_dict if "lora_" in k} elif bias == "all": to_return = {k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k} elif bias == "lora_only": to_return = {} for k in state_dict: if "lora_" in k: to_return[k] = state_dict[k] bias_name = k.split("lora_")[0] + "bias" if bias_name in state_dict: to_return[bias_name] = state_dict[bias_name] else: raise NotImplementedError to_return = {k: v for k, v in to_return.items() if (("lora_" in k and adapter_name in k) or ("bias" in k))} if config.peft_type == PeftType.ADALORA: rank_pattern = config.rank_pattern if rank_pattern is not None: rank_pattern = {k.replace(f".{adapter_name}", ""): v for k, v in rank_pattern.items()} config.rank_pattern = rank_pattern to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name) elif config.peft_type == PeftType.LOHA: to_return = {k: state_dict[k] for k in state_dict if "hada_" in k} elif config.peft_type == PeftType.LOKR: to_return = {k: state_dict[k] for k in state_dict if "lokr_" in k} elif config.peft_type == PeftType.ADAPTION_PROMPT: to_return = {k: state_dict[k] for k in state_dict if k.split(".")[-1].startswith("adaption_")} elif config.is_prompt_learning: to_return = {} if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: to_return["prefix_task_cols"] = model.prompt_encoder[adapter_name].prefix_task_cols to_return["prefix_task_rows"] = model.prompt_encoder[adapter_name].prefix_task_rows prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight else: if config.inference_mode: prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight else: prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name) to_return["prompt_embeddings"] = prompt_embeddings elif config.peft_type == PeftType.IA3: to_return = {k: state_dict[k] for k in state_dict if "ia3_" in k} elif config.peft_type == PeftType.OFT: to_return = {k: state_dict[k] for k in state_dict if "oft_" in k} elif config.peft_type == PeftType.POLY: to_return = {k: state_dict[k] for k in state_dict if "poly_" in k} else: raise NotImplementedError if getattr(model, "modules_to_save", None) is not None: for key, value in state_dict.items(): if any(f"{module_name}.modules_to_save.{adapter_name}" in key for module_name in model.modules_to_save): to_return[key.replace("modules_to_save.", "")] = value # check the common embedding layers in `target_modules` to reset `save_embedding_layers` if necessary is_embedding_in_target_modules = False if ( save_embedding_layers == "auto" and hasattr(config, "target_modules") and any(k in config.target_modules for k in EMBEDDING_LAYER_NAMES) ): warnings.warn("Setting `save_embedding_layers` to `True` as embedding layers found in `target_modules`.") save_embedding_layers = is_embedding_in_target_modules = True elif save_embedding_layers == "auto": vocab_size = getattr(getattr(model, "config", None), "vocab_size", None) model_id = getattr(config, "base_model_name_or_path", None) # For some models e.g. diffusers the text config file is stored in a subfolder # we need to make sure we can download that config. has_remote_config = False # ensure that this check is not performed in HF offline mode, see #1452 if model_id is not None: exists = check_file_exists_on_hf_hub(model_id, "config.json") if exists is None: # check failed, could not determine if it exists or not warnings.warn( f"Could not find a config file in {model_id} - will assume that the vocabulary was not modified." ) has_remote_config = False else: has_remote_config = exists # check if the vocab size of the base model is different from the vocab size of the finetuned model if ( vocab_size and model_id and has_remote_config and (vocab_size != model.config.__class__.from_pretrained(model_id).vocab_size) ): warnings.warn( "Setting `save_embedding_layers` to `True` as the embedding layer has been resized during finetuning." ) save_embedding_layers = True else: save_embedding_layers = False if save_embedding_layers and hasattr(model, "get_input_embeddings"): for layer in [model.get_input_embeddings(), model.get_output_embeddings()]: if not is_embedding_in_target_modules or has_valid_embedding_base_layer(layer): # support from version >= 0.6.2 embedding_module_name = get_embedding_layer_name(model, layer, is_embedding_in_target_modules) if embedding_module_name: to_return.update({k: v for k, v in state_dict.items() if embedding_module_name in k}) elif save_embedding_layers: warnings.warn("Could not identify embedding layer(s) because the model is not a 🤗 transformers model.") to_return = {k.replace(f".{adapter_name}", ""): v for k, v in to_return.items()} return to_return
Get the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP, the model should be the underlying model/unwrapped model (i.e. model.module). state_dict (`dict`, *optional*, defaults to `None`): The state dict of the model. If not provided, the state dict of the passed model will be used. adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter whose state dict should be returned. unwrap_compiled (`bool`, *optional*, defaults to `False`): Whether to unwrap the model if torch.compile was used. save_embedding_layers (`Union[bool, str]`, , *optional*, defaults to `auto`): If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. Based on it sets the boolean flag. This only works for 🤗 transformers models.
161,419
import os import warnings from typing import Optional import torch from huggingface_hub import file_exists, hf_hub_download from huggingface_hub.utils import EntryNotFoundError from safetensors.torch import load_file as safe_load_file from .other import ( EMBEDDING_LAYER_NAMES, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, check_file_exists_on_hf_hub, infer_device, ) from .peft_types import PeftType class PeftType(str, enum.Enum): """ Enum class for the different types of adapters in PEFT. Supported PEFT types: - PROMPT_TUNING - MULTITASK_PROMPT_TUNING - P_TUNING - PREFIX_TUNING - LORA - ADALORA - ADAPTION_PROMPT - IA3 - LOHA - LOKR - OFT """ PROMPT_TUNING = "PROMPT_TUNING" MULTITASK_PROMPT_TUNING = "MULTITASK_PROMPT_TUNING" P_TUNING = "P_TUNING" PREFIX_TUNING = "PREFIX_TUNING" LORA = "LORA" ADALORA = "ADALORA" ADAPTION_PROMPT = "ADAPTION_PROMPT" IA3 = "IA3" LOHA = "LOHA" LOKR = "LOKR" OFT = "OFT" POLY = "POLY" The provided code snippet includes necessary dependencies for implementing the `set_peft_model_state_dict` function. Write a Python function `def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name="default")` to solve the following problem: Set the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. peft_model_state_dict (`dict`): The state dict of the Peft model. Here is the function: def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name="default"): """ Set the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. peft_model_state_dict (`dict`): The state dict of the Peft model. """ config = model.peft_config[adapter_name] state_dict = {} if getattr(model, "modules_to_save", None) is not None: for key, value in peft_model_state_dict.items(): if any(module_name in key for module_name in model.modules_to_save): for module_name in model.modules_to_save: if module_name in key: key = key.replace(module_name, f"{module_name}.modules_to_save.{adapter_name}") break state_dict[key] = value else: state_dict = peft_model_state_dict if config.peft_type in ( PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.ADALORA, PeftType.IA3, PeftType.OFT, PeftType.POLY, ): peft_model_state_dict = {} parameter_prefix = { PeftType.IA3: "ia3_", PeftType.LORA: "lora_", PeftType.ADALORA: "lora_", PeftType.LOHA: "hada_", PeftType.LOKR: "lokr_", PeftType.OFT: "oft_", PeftType.POLY: "poly_", }[config.peft_type] for k, v in state_dict.items(): if parameter_prefix in k: suffix = k.split(parameter_prefix)[1] if "." in suffix: suffix_to_replace = ".".join(suffix.split(".")[1:]) k = k.replace(suffix_to_replace, f"{adapter_name}.{suffix_to_replace}") else: k = f"{k}.{adapter_name}" peft_model_state_dict[k] = v else: peft_model_state_dict[k] = v if config.peft_type == PeftType.ADALORA: rank_pattern = config.rank_pattern if rank_pattern is not None: model.resize_modules_by_rank_pattern(rank_pattern, adapter_name) elif config.is_prompt_learning or config.peft_type == PeftType.ADAPTION_PROMPT: peft_model_state_dict = state_dict else: raise NotImplementedError load_result = model.load_state_dict(peft_model_state_dict, strict=False) if config.is_prompt_learning: model.prompt_encoder[adapter_name].embedding.load_state_dict( {"weight": peft_model_state_dict["prompt_embeddings"]}, strict=True ) if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: model.prompt_encoder[adapter_name].load_state_dict(peft_model_state_dict, strict=False) return load_result
Set the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. peft_model_state_dict (`dict`): The state dict of the Peft model.
161,420
import os import warnings from typing import Optional import torch from huggingface_hub import file_exists, hf_hub_download from huggingface_hub.utils import EntryNotFoundError from safetensors.torch import load_file as safe_load_file from .other import ( EMBEDDING_LAYER_NAMES, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, check_file_exists_on_hf_hub, infer_device, ) from .peft_types import PeftType def infer_device() -> str: if torch.cuda.is_available(): return "cuda" elif hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): return "mps" elif is_xpu_available(): return "xpu" elif is_npu_available(): return "npu" return "cpu" The provided code snippet includes necessary dependencies for implementing the `load_peft_weights` function. Write a Python function `def load_peft_weights(model_id: str, device: Optional[str] = None, **hf_hub_download_kwargs) -> dict` to solve the following problem: r""" A helper method to load the PEFT weights from the HuggingFace Hub or locally Args: model_id (`str`): The local path to the adapter weights or the name of the adapter to load from the HuggingFace Hub. device (`str`): The device to load the weights onto. hf_hub_download_kwargs (`dict`): Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub. Here is the function: def load_peft_weights(model_id: str, device: Optional[str] = None, **hf_hub_download_kwargs) -> dict: r""" A helper method to load the PEFT weights from the HuggingFace Hub or locally Args: model_id (`str`): The local path to the adapter weights or the name of the adapter to load from the HuggingFace Hub. device (`str`): The device to load the weights onto. hf_hub_download_kwargs (`dict`): Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub. """ path = ( os.path.join(model_id, hf_hub_download_kwargs["subfolder"]) if hf_hub_download_kwargs.get("subfolder", None) is not None else model_id ) if device is None: device = infer_device() if os.path.exists(os.path.join(path, SAFETENSORS_WEIGHTS_NAME)): filename = os.path.join(path, SAFETENSORS_WEIGHTS_NAME) use_safetensors = True elif os.path.exists(os.path.join(path, WEIGHTS_NAME)): filename = os.path.join(path, WEIGHTS_NAME) use_safetensors = False else: token = hf_hub_download_kwargs.get("token", None) if token is None: token = hf_hub_download_kwargs.get("use_auth_token", None) hub_filename = ( os.path.join(hf_hub_download_kwargs["subfolder"], SAFETENSORS_WEIGHTS_NAME) if hf_hub_download_kwargs.get("subfolder", None) is not None else SAFETENSORS_WEIGHTS_NAME ) has_remote_safetensors_file = file_exists( repo_id=model_id, filename=hub_filename, revision=hf_hub_download_kwargs.get("revision", None), repo_type=hf_hub_download_kwargs.get("repo_type", None), token=token, ) use_safetensors = has_remote_safetensors_file if has_remote_safetensors_file: # Priority 1: load safetensors weights filename = hf_hub_download( model_id, SAFETENSORS_WEIGHTS_NAME, **hf_hub_download_kwargs, ) else: try: filename = hf_hub_download(model_id, WEIGHTS_NAME, **hf_hub_download_kwargs) except EntryNotFoundError: raise ValueError( f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. " f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {model_id}." ) if use_safetensors: if hasattr(torch.backends, "mps") and (device == torch.device("mps")): adapters_weights = safe_load_file(filename, device="cpu") else: adapters_weights = safe_load_file(filename, device=device) else: adapters_weights = torch.load(filename, map_location=torch.device(device)) return adapters_weights
r""" A helper method to load the PEFT weights from the HuggingFace Hub or locally Args: model_id (`str`): The local path to the adapter weights or the name of the adapter to load from the HuggingFace Hub. device (`str`): The device to load the weights onto. hf_hub_download_kwargs (`dict`): Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub.
161,421
import logging from typing import Union import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available class NFQuantizer: def __init__(self, num_bits=2, device="cuda", method="normal", block_size=64, *args, **kwargs): super().__init__(*args, **kwargs) self.num_bits = num_bits self.device = device self.method = method self.block_size = block_size if self.method == "normal": self.norm_lookup_table = self.create_normal_map(num_bits=self.num_bits) self.norm_lookup_table = self.norm_lookup_table.to(device) elif self.method == "uniform": self.norm_lookup_table = self.create_uniform_map(num_bits=self.num_bits) self.norm_lookup_table = self.norm_lookup_table.to(device) else: raise NotImplementedError("Other quantization methods not supported yet.") def create_uniform_map(symmetric=False, num_bits=4): if symmetric: # print("symmetric uniform quantization") negative = torch.linspace(-1, 0, 2 ** (num_bits - 1)) positive = torch.linspace(0, 1, 2 ** (num_bits - 1)) table = torch.cat([negative, positive[1:]]) else: # print("asymmetric uniform quantization") table = torch.linspace(-1, 1, 2**num_bits) return table def create_normal_map(offset=0.9677083, symmetric=False, num_bits=2): try: from scipy.stats import norm except ImportError: raise ImportError("The required package 'scipy' is not installed. Please install it to continue.") variations = 2**num_bits if symmetric: v = norm.ppf(torch.linspace(1 - offset, offset, variations + 1)).tolist() values = [] for index in range(len(v) - 1): values.append(0.5 * v[index] + 0.5 * v[index + 1]) v = values else: # one more positive value, this is an asymmetric type v1 = norm.ppf(torch.linspace(offset, 0.5, variations // 2 + 1)[:-1]).tolist() v2 = [0] v3 = (-norm.ppf(torch.linspace(offset, 0.5, variations // 2)[:-1])).tolist() v = v1 + v2 + v3 values = torch.Tensor(v) values = values.sort().values values /= values.max() return values def quantize_tensor(self, weight): max_abs = torch.abs(weight).max() weight_normed = weight / max_abs weight_normed_expanded = weight_normed.unsqueeze(-1) # Reshape L to have the same number of dimensions as X_expanded L_reshaped = torch.tensor(self.norm_lookup_table).reshape(1, -1) # Calculate the absolute difference between X_expanded and L_reshaped abs_diff = torch.abs(weight_normed_expanded - L_reshaped) # Find the index of the minimum absolute difference for each element qweight = torch.argmin(abs_diff, dim=-1) return qweight, max_abs def dequantize_tensor(self, qweight, max_abs): qweight_flatten = qweight.flatten() weight_normed = self.norm_lookup_table[qweight_flatten] weight = weight_normed * max_abs weight = weight.reshape(qweight.shape) return weight def quantize_block(self, weight): if len(weight.shape) != 2: raise ValueError(f"Only support 2D matrix, but your input has {len(weight.shape)} dimensions.") if weight.shape[0] * weight.shape[1] % self.block_size != 0: raise ValueError( f"Weight with shape ({weight.shape[0]} x {weight.shape[1]}) " f"is not dividable by block size {self.block_size}." ) M, N = weight.shape device = weight.device # Quantization weight_flatten = weight.flatten() # (M*N, ) weight_block = weight_flatten.reshape(-1, self.block_size) # (L, B), L = M * N / B if self.method == "normal": weight_max = weight_block.abs().max(dim=-1)[0] # (L, 1) elif self.method == "uniform": weight_max = weight_block.mean(dim=-1) + 2.5 * weight_block.std(dim=-1) else: raise NotImplementedError("Method not supported yet.") weight_max = weight_max.unsqueeze(-1) weight_divabs = weight_block / weight_max # (L, B) weight_divabs = weight_divabs.unsqueeze(-1) # (L, B, 1) L_reshaped = self.norm_lookup_table.reshape(1, -1) # (1, 2**K) abs_diff = torch.abs(weight_divabs - L_reshaped) # (L, B, 2**K) qweight = torch.argmin(abs_diff, dim=-1) # (L, B) # Pack multiple k-bit into uint8 qweight = qweight.reshape(-1, 8 // self.num_bits) qweight_pack = torch.zeros((M * N // 8 * self.num_bits, 1), dtype=torch.uint8, device=device) # data format example: # [1, 0, 3, 2] or [01, 00, 11, 10] -> [10110001], LIFO for i in range(8 // self.num_bits): qweight[:, i] = qweight[:, i] << i * self.num_bits qweight_pack[:, 0] |= qweight[:, i] return qweight_pack, weight_max, weight.shape def dequantize_block(self, qweight, weight_max, weight_shape): # unpack weight device = qweight.device weight = torch.zeros((qweight.shape[0], 8 // self.num_bits), dtype=torch.float32, device=device) for i in range(8 // self.num_bits): lookup_table_idx = qweight.to(torch.long) % 2**self.num_bits # get the most right 2 bits lookup_table_idx = lookup_table_idx.to(torch.long) weight[:, i] = self.norm_lookup_table[lookup_table_idx].squeeze() qweight = qweight >> self.num_bits # right shift 2 bits of the original data weight_block = weight.reshape(-1, self.block_size) weight = weight_block * weight_max weight = weight.reshape(weight_shape) return weight def _low_rank_decomposition(weight, reduced_rank=32): """ :param weight: The matrix to decompose, of shape (H, W) :param reduced_rank: the final rank :return: """ matrix_dimension = len(weight.size()) if matrix_dimension != 2: raise ValueError(f"Only support 2D matrix, but your input has {matrix_dimension} dimensions.") # Use SVD to decompose a matrix, default full_matrices is False to save parameters U, S, Vh = torch.linalg.svd(weight, full_matrices=False) L = U @ (torch.sqrt(torch.diag(S)[:, 0:reduced_rank])) R = torch.sqrt(torch.diag(S)[0:reduced_rank, :]) @ Vh return {"L": L, "R": R, "U": U, "S": S, "Vh": Vh, "reduced_rank": reduced_rank} def is_bnb_4bit_available() -> bool: if not is_bnb_available(): return False import bitsandbytes as bnb return hasattr(bnb.nn, "Linear4bit") def loftq_init(weight: Union[torch.Tensor, torch.nn.Parameter], num_bits: int, reduced_rank: int, num_iter=1): if num_bits not in [2, 4, 8]: raise ValueError("Only support 2, 4, 8 bits quantization") if num_iter <= 0: raise ValueError("Number of iterations must be greater than 0") out_feature, in_feature = weight.size() device = weight.device dtype = weight.dtype logging.info( f"Weight: ({out_feature}, {in_feature}) | Rank: {reduced_rank} " f"| Num Iter: {num_iter} | Num Bits: {num_bits}" ) if not is_bnb_4bit_available() or num_bits in [2, 8]: quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64) compute_device = device else: compute_device = "cuda" weight = weight.to(device=compute_device, dtype=torch.float32) res = weight.clone() for i in range(num_iter): torch.cuda.empty_cache() # Quantization if num_bits == 4 and is_bnb_4bit_available(): qweight = bnb.nn.Params4bit( res.to("cpu"), requires_grad=False, compress_statistics=False, quant_type="nf4" ).to(compute_device) dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state) else: quantized_weight, max_abs, shape = quantizer.quantize_block(res) dequantized_weight = quantizer.dequantize_block(quantized_weight, max_abs, shape) res = weight - dequantized_weight # Decompose the residual by SVD output = _low_rank_decomposition(res, reduced_rank=reduced_rank) L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"] res = weight - torch.mm(L, R) lora_A, lora_B = R, L return dequantized_weight.to(device=device, dtype=dtype), lora_A, lora_B
null
161,422
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True, gradient_checkpointing_kwargs=None): r""" Note this method only works for `transformers` models. This method wraps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model (`transformers.PreTrainedModel`): The loaded model from `transformers` use_gradient_checkpointing (`bool`, *optional*, defaults to `True`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. gradient_checkpointing_kwargs (`dict`, *optional*, defaults to `None`): Keyword arguments to pass to the gradient checkpointing function, please refer to the documentation of `torch.utils.checkpoint.checkpoint` for more details about the arguments that you can pass to that method. Note this is only available in the latest transformers versions (> 4.34.1). """ loaded_in_kbit = getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) is_gptq_quantized = getattr(model, "quantization_method", None) == "gptq" is_aqlm_quantized = getattr(model, "quantization_method", None) == "aqlm" if gradient_checkpointing_kwargs is None: gradient_checkpointing_kwargs = {} for name, param in model.named_parameters(): # freeze base model's layers param.requires_grad = False if not is_gptq_quantized and not is_aqlm_quantized: # cast all non INT8 parameters to fp32 for param in model.parameters(): if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) if (loaded_in_kbit or is_gptq_quantized or is_aqlm_quantized) and use_gradient_checkpointing: # When having `use_reentrant=False` + gradient_checkpointing, there is no need for this hack if "use_reentrant" not in gradient_checkpointing_kwargs or gradient_checkpointing_kwargs["use_reentrant"]: # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) # To support older transformers versions, check if the model supports gradient_checkpointing_kwargs _supports_gc_kwargs = "gradient_checkpointing_kwargs" in list( inspect.signature(model.gradient_checkpointing_enable).parameters ) if not _supports_gc_kwargs and len(gradient_checkpointing_kwargs) > 0: warnings.warn( "gradient_checkpointing_kwargs is not supported in this version of transformers. The passed kwargs will be ignored." " if you want to use that feature, please upgrade to the latest version of transformers.", FutureWarning, ) gc_enable_kwargs = ( {} if not _supports_gc_kwargs else {"gradient_checkpointing_kwargs": gradient_checkpointing_kwargs} ) # enable gradient checkpointing for memory efficiency model.gradient_checkpointing_enable(**gc_enable_kwargs) return model def prepare_model_for_int8_training(*args, **kwargs): warnings.warn( "prepare_model_for_int8_training is deprecated and will be removed in a future version. Use prepare_model_for_kbit_training instead.", FutureWarning, ) return prepare_model_for_kbit_training(*args, **kwargs)
null
161,423
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) The provided code snippet includes necessary dependencies for implementing the `shift_tokens_right` function. Write a Python function `def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int)` to solve the following problem: Shift input ids one token to the right. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids pad_token_id (`int`): The id of the `padding` token. decoder_start_token_id (`int`): The id of the `start` token. Here is the function: def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids pad_token_id (`int`): The id of the `padding` token. decoder_start_token_id (`int`): The id of the `start` token. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids
Shift input ids one token to the right. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids pad_token_id (`int`): The id of the `padding` token. decoder_start_token_id (`int`): The id of the `start` token.
161,424
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) def _freeze_adapter(model, adapter_name): for n, p in model.named_parameters(): if adapter_name in n: p.requires_grad = False
null
161,425
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) class ModulesToSaveWrapper(torch.nn.Module): def __init__(self, module_to_save, adapter_name): super().__init__() self.original_module = module_to_save self.modules_to_save = torch.nn.ModuleDict({}) self._active_adapter = adapter_name self._disable_adapters = False self.update(adapter_name) self.check_module() def check_module(self): """Perform some sanity checks on the module to ensure that it works""" # Try to anticipate some modules that users could try to target that would not work. # Note: It's not possible to check hasattr(module, "forward"), since that returns True for ModuleDict and # ModuleList, even though their forward methods cannot be called forbidden_classes = (torch.nn.ModuleDict, torch.nn.ModuleList, torch.nn.ParameterDict, torch.nn.ParameterList) if isinstance(self.original_module, forbidden_classes): cls_name = self.original_module.__class__.__name__ raise TypeError(f"modules_to_save cannot be applied to modules of type {cls_name}") def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def weight(self): if self.active_adapter not in self.modules_to_save: return self.original_module.weight return self.modules_to_save[self.active_adapter].weight def update(self, adapter_name): context_manager = nullcontext() for _, param in self.original_module.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): import deepspeed context_manager = deepspeed.zero.GatheredParameters(self.original_module.parameters(), modifier_rank=0) break with context_manager: self.modules_to_save.update(torch.nn.ModuleDict({adapter_name: copy.deepcopy(self.original_module)})) if hasattr(self.modules_to_save[adapter_name], "_hf_hook"): old_hook = self.modules_to_save[adapter_name]._hf_hook new_hook = self._create_new_hook(old_hook) remove_hook_from_module(self.modules_to_save[adapter_name]) add_hook_to_module(self.modules_to_save[adapter_name], new_hook) self.original_module.requires_grad_(False) if adapter_name == self.active_adapter: self.modules_to_save[adapter_name].requires_grad_(True) def _create_new_hook(self, old_hook): r""" Creates a new hook based on the old hook. Use it only if you know what you are doing ! """ old_hook_cls = getattr(accelerate.hooks, old_hook.__class__.__name__) old_hook_attr = old_hook.__dict__ filtered_old_hook_attr = {} old_hook_init_signature = inspect.signature(old_hook_cls.__init__) for k in old_hook_attr.keys(): if k in old_hook_init_signature.parameters: filtered_old_hook_attr[k] = old_hook_attr[k] new_hook = old_hook_cls(**filtered_old_hook_attr) return new_hook def forward(self, *args, **kwargs): if self.disable_adapters or (self.active_adapter not in self.modules_to_save): return self.original_module(*args, **kwargs) return self.modules_to_save[self.active_adapter](*args, **kwargs) def enable_adapters(self, enabled: bool): """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if self._disable_adapters is not enabled: # already in the desired state, do nothing return if enabled: self.original_module.requires_grad_(False) self.modules_to_save[self.active_adapter].requires_grad_(True) self._disable_adapters = False else: self.original_module.requires_grad_(True) self.modules_to_save.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_name: str): """Set the active adapter Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (str): The name of the adapter to set as active """ if adapter_name not in self.modules_to_save: raise ValueError(f"Adapter {adapter_name} not found in {self.modules_to_save.keys()}") self.modules_to_save[self.active_adapter].requires_grad_(False) self.modules_to_save[adapter_name].requires_grad_(True) self._active_adapter = adapter_name def _get_submodules(model, key): parent = model.get_submodule(".".join(key.split(".")[:-1])) target_name = key.split(".")[-1] target = model.get_submodule(key) return parent, target, target_name def _set_trainable(model, adapter_name): key_list = [key for key, _ in model.named_modules()] for key in key_list: target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save) if target_module_found: parent, target, target_name = _get_submodules(model, key) if isinstance(target, ModulesToSaveWrapper): target.update(adapter_name) target.set_adapter(target.active_adapter) else: new_module = ModulesToSaveWrapper(target, adapter_name) new_module.set_adapter(adapter_name) setattr(parent, target_name, new_module)
null
161,426
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) class ModulesToSaveWrapper(torch.nn.Module): def __init__(self, module_to_save, adapter_name): super().__init__() self.original_module = module_to_save self.modules_to_save = torch.nn.ModuleDict({}) self._active_adapter = adapter_name self._disable_adapters = False self.update(adapter_name) self.check_module() def check_module(self): """Perform some sanity checks on the module to ensure that it works""" # Try to anticipate some modules that users could try to target that would not work. # Note: It's not possible to check hasattr(module, "forward"), since that returns True for ModuleDict and # ModuleList, even though their forward methods cannot be called forbidden_classes = (torch.nn.ModuleDict, torch.nn.ModuleList, torch.nn.ParameterDict, torch.nn.ParameterList) if isinstance(self.original_module, forbidden_classes): cls_name = self.original_module.__class__.__name__ raise TypeError(f"modules_to_save cannot be applied to modules of type {cls_name}") def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def weight(self): if self.active_adapter not in self.modules_to_save: return self.original_module.weight return self.modules_to_save[self.active_adapter].weight def update(self, adapter_name): context_manager = nullcontext() for _, param in self.original_module.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): import deepspeed context_manager = deepspeed.zero.GatheredParameters(self.original_module.parameters(), modifier_rank=0) break with context_manager: self.modules_to_save.update(torch.nn.ModuleDict({adapter_name: copy.deepcopy(self.original_module)})) if hasattr(self.modules_to_save[adapter_name], "_hf_hook"): old_hook = self.modules_to_save[adapter_name]._hf_hook new_hook = self._create_new_hook(old_hook) remove_hook_from_module(self.modules_to_save[adapter_name]) add_hook_to_module(self.modules_to_save[adapter_name], new_hook) self.original_module.requires_grad_(False) if adapter_name == self.active_adapter: self.modules_to_save[adapter_name].requires_grad_(True) def _create_new_hook(self, old_hook): r""" Creates a new hook based on the old hook. Use it only if you know what you are doing ! """ old_hook_cls = getattr(accelerate.hooks, old_hook.__class__.__name__) old_hook_attr = old_hook.__dict__ filtered_old_hook_attr = {} old_hook_init_signature = inspect.signature(old_hook_cls.__init__) for k in old_hook_attr.keys(): if k in old_hook_init_signature.parameters: filtered_old_hook_attr[k] = old_hook_attr[k] new_hook = old_hook_cls(**filtered_old_hook_attr) return new_hook def forward(self, *args, **kwargs): if self.disable_adapters or (self.active_adapter not in self.modules_to_save): return self.original_module(*args, **kwargs) return self.modules_to_save[self.active_adapter](*args, **kwargs) def enable_adapters(self, enabled: bool): """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if self._disable_adapters is not enabled: # already in the desired state, do nothing return if enabled: self.original_module.requires_grad_(False) self.modules_to_save[self.active_adapter].requires_grad_(True) self._disable_adapters = False else: self.original_module.requires_grad_(True) self.modules_to_save.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_name: str): """Set the active adapter Additionally, this function will set the specified adapter to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (str): The name of the adapter to set as active """ if adapter_name not in self.modules_to_save: raise ValueError(f"Adapter {adapter_name} not found in {self.modules_to_save.keys()}") self.modules_to_save[self.active_adapter].requires_grad_(False) self.modules_to_save[adapter_name].requires_grad_(True) self._active_adapter = adapter_name def _set_adapter(model, adapter_name): def check_adapter_name(adapter_name): if isinstance(adapter_name, str): return adapter_name # adapter_name is a list of str if len(adapter_name) > 1: raise ValueError("Only one adapter can be set at a time for modules_to_save") elif len(adapter_name) == 0: raise ValueError("Please specify at least one adapter to set") adapter_name = adapter_name[0] return adapter_name for module in model.modules(): if isinstance(module, ModulesToSaveWrapper): # only check the adapter_name if we actually encounter a ModulesToSaveWrapper, otherwise we don't care adapter_name = check_adapter_name(adapter_name) module.set_adapter(adapter_name)
null
161,427
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", peft_config.token_dim) return peft_config
null
161,428
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) def fsdp_auto_wrap_policy(model): import functools import os from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.wrap import _or_policy, lambda_auto_wrap_policy, transformer_auto_wrap_policy from ..tuners import PrefixEncoder, PromptEmbedding, PromptEncoder default_transformer_cls_names_to_wrap = ( ",".join(model._no_split_modules) if getattr(model, "_no_split_modules", None) is not None else "" ) transformer_cls_names_to_wrap = os.environ.get( "FSDP_TRANSFORMER_CLS_TO_WRAP", default_transformer_cls_names_to_wrap ).split(",") transformer_cls_to_wrap = {PrefixEncoder, PromptEncoder, PromptEmbedding} for layer_class in transformer_cls_names_to_wrap: transformer_cls = FullyShardedDataParallelPlugin.get_module_class_from_name(model, layer_class) if transformer_cls is None: raise Exception("Could not find the transformer layer class to wrap in the model.") else: transformer_cls_to_wrap.add(transformer_cls) def lambda_policy_fn(module): if ( len(list(module.named_children())) == 0 and getattr(module, "weight", None) is not None and module.weight.requires_grad ): return True return False lambda_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=lambda_policy_fn) transformer_wrap_policy = functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=transformer_cls_to_wrap, ) auto_wrap_policy = functools.partial(_or_policy, policies=[lambda_policy, transformer_wrap_policy]) return auto_wrap_policy
null
161,429
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) def transpose(weight, fan_in_fan_out): if not fan_in_fan_out: return weight if isinstance(weight, torch.nn.Parameter): return torch.nn.Parameter(weight.T) return weight.T
null
161,430
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) The provided code snippet includes necessary dependencies for implementing the `_is_valid_match` function. Write a Python function `def _is_valid_match(key: str, target_key: str)` to solve the following problem: Helper function to match module names target_key and key. Makes sure that either the key is exactly the target_key or the target_key is a submodule of key Here is the function: def _is_valid_match(key: str, target_key: str): """ Helper function to match module names target_key and key. Makes sure that either the key is exactly the target_key or the target_key is a submodule of key """ if key.endswith(target_key): if len(key) > len(target_key): return key.endswith("." + target_key) # must be a sub module return True return False
Helper function to match module names target_key and key. Makes sure that either the key is exactly the target_key or the target_key is a submodule of key
161,431
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) The provided code snippet includes necessary dependencies for implementing the `_get_batch_size` function. Write a Python function `def _get_batch_size(input_ids: Optional[torch.Tensor], inputs_embeds: Optional[torch.Tensor]) -> int` to solve the following problem: Get the batch size based on either input_ids or input_embeds Raises an ValueError if both are None. Here is the function: def _get_batch_size(input_ids: Optional[torch.Tensor], inputs_embeds: Optional[torch.Tensor]) -> int: """Get the batch size based on either input_ids or input_embeds Raises an ValueError if both are None. """ if (input_ids is None) and (inputs_embeds is None): raise ValueError("You have to provide either input_ids or inputs_embeds") if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] return batch_size
Get the batch size based on either input_ids or input_embeds Raises an ValueError if both are None.
161,432
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) The provided code snippet includes necessary dependencies for implementing the `get_quantization_config` function. Write a Python function `def get_quantization_config(model: torch.nn.Module, method: str)` to solve the following problem: Get the quantization config of the related quantization method Here is the function: def get_quantization_config(model: torch.nn.Module, method: str): """ Get the quantization config of the related quantization method """ if ( hasattr(model, "config") and hasattr(model.config, "quantization_config") and (getattr(model, "quantization_method", None) == method) ): return model.config.quantization_config return None
Get the quantization config of the related quantization method
161,433
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) def is_auto_gptq_available(): if importlib.util.find_spec("auto_gptq") is not None: AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0") version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq")) if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq: return True else: raise ImportError( f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, " f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported" ) The provided code snippet includes necessary dependencies for implementing the `get_auto_gptq_quant_linear` function. Write a Python function `def get_auto_gptq_quant_linear(gptq_quantization_config)` to solve the following problem: Get the right AutoGPTQQuantLinear class based on the quantization config file Here is the function: def get_auto_gptq_quant_linear(gptq_quantization_config): """ Get the right AutoGPTQQuantLinear class based on the quantization config file """ if gptq_quantization_config is not None and is_auto_gptq_available(): from auto_gptq.utils.import_utils import dynamically_import_QuantLinear desc_act = gptq_quantization_config.desc_act group_size = gptq_quantization_config.group_size bits = gptq_quantization_config.bits if hasattr(gptq_quantization_config, "use_exllama"): use_exllama = gptq_quantization_config.use_exllama else: use_exllama = not gptq_quantization_config.disable_exllama if hasattr(gptq_quantization_config, "exllama_config"): exllama_version = gptq_quantization_config.exllama_config["version"] else: exllama_version = 1 AutoGPTQQuantLinear = dynamically_import_QuantLinear( use_triton=False, desc_act=desc_act, group_size=group_size, bits=bits, disable_exllama=not (use_exllama and exllama_version == 1), disable_exllamav2=not (use_exllama and exllama_version == 2), ) return AutoGPTQQuantLinear return None
Get the right AutoGPTQQuantLinear class based on the quantization config file
161,434
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) def is_torch_tpu_available(check_device=True): "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" if importlib.util.find_spec("torch_xla") is not None: if check_device: # We need to check if `xla_device` can be found, will raise a RuntimeError if not try: import torch_xla.core.xla_model as xm _ = xm.xla_device() return True except RuntimeError: return False return True return False The provided code snippet includes necessary dependencies for implementing the `id_tensor_storage` function. Write a Python function `def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]` to solve the following problem: Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id. This method is the exact same copy of https://github.com/huggingface/transformers/blob/main/src/transformers/pytorch_utils.py#L282C1-L300C58 but we added it here manually to avoid import issue with old versions of transformers. Here is the function: def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: """ Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id. This method is the exact same copy of https://github.com/huggingface/transformers/blob/main/src/transformers/pytorch_utils.py#L282C1-L300C58 but we added it here manually to avoid import issue with old versions of transformers. """ if tensor.device.type == "xla" and is_torch_tpu_available(): # NOTE: xla tensors dont have storage # use some other unique id to distinguish. # this is a XLA tensor, it must be created using torch_xla's # device. So the following import is safe: import torch_xla unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor) else: unique_id = storage_ptr(tensor) return tensor.device, unique_id, storage_size(tensor)
Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id. This method is the exact same copy of https://github.com/huggingface/transformers/blob/main/src/transformers/pytorch_utils.py#L282C1-L300C58 but we added it here manually to avoid import issue with old versions of transformers.
161,435
import copy import inspect import os import warnings from contextlib import nullcontext from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from huggingface_hub import file_exists from huggingface_hub.utils import EntryNotFoundError, HFValidationError from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( CONFIG_NAME, EMBEDDING_LAYER_NAMES, INCLUDE_LINEAR_LAYERS_SHORTHAND, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) The provided code snippet includes necessary dependencies for implementing the `cast_mixed_precision_params` function. Write a Python function `def cast_mixed_precision_params(model, dtype)` to solve the following problem: Cast all non-trainable parameters of the model to the given `dtype`. The `dtype` can be `torch.float16` or `torch.bfloat16` as per the mixed-precision training you are performing. The trainable parameters are cast to full precision. This is meant to reduce the GPU memory usage when using PEFT methods by using half-precision dtype for non-trainable parameters. Having the trainable parameters in full-precision preserves training stability when using automatic mixed-precision training. Args: model (`torch.nn.Module`): The model to cast the non-trainable parameters of. dtype (`torch.dtype`): The dtype to cast the non-trainable parameters to. The `dtype` can be `torch.float16` or `torch.bfloat16` as per the mixed-precision training you are performing. Here is the function: def cast_mixed_precision_params(model, dtype): """ Cast all non-trainable parameters of the model to the given `dtype`. The `dtype` can be `torch.float16` or `torch.bfloat16` as per the mixed-precision training you are performing. The trainable parameters are cast to full precision. This is meant to reduce the GPU memory usage when using PEFT methods by using half-precision dtype for non-trainable parameters. Having the trainable parameters in full-precision preserves training stability when using automatic mixed-precision training. Args: model (`torch.nn.Module`): The model to cast the non-trainable parameters of. dtype (`torch.dtype`): The dtype to cast the non-trainable parameters to. The `dtype` can be `torch.float16` or `torch.bfloat16` as per the mixed-precision training you are performing. """ for p in model.parameters(): if not p.requires_grad: p.data = p.to(dtype) else: p.data = p.to(torch.float32)
Cast all non-trainable parameters of the model to the given `dtype`. The `dtype` can be `torch.float16` or `torch.bfloat16` as per the mixed-precision training you are performing. The trainable parameters are cast to full precision. This is meant to reduce the GPU memory usage when using PEFT methods by using half-precision dtype for non-trainable parameters. Having the trainable parameters in full-precision preserves training stability when using automatic mixed-precision training. Args: model (`torch.nn.Module`): The model to cast the non-trainable parameters of. dtype (`torch.dtype`): The dtype to cast the non-trainable parameters to. The `dtype` can be `torch.float16` or `torch.bfloat16` as per the mixed-precision training you are performing.
161,436
import warnings from typing import List, Literal import torch def reshape_weight_task_tensors(task_tensors, weights): """ Reshapes `weights` to match the shape of `task_tensors` by unsqeezing in the remaining dimenions. Args: task_tensors (`torch.Tensor`): The tensors that will be used to reshape `weights`. weights (`torch.Tensor`): The tensor to be reshaped. Returns: `torch.Tensor`: The reshaped tensor. """ new_shape = weights.shape + (1,) * (task_tensors.dim() - weights.dim()) weights = weights.view(new_shape) return weights The provided code snippet includes necessary dependencies for implementing the `task_arithmetic` function. Write a Python function `def task_arithmetic(task_tensors: List[torch.Tensor], weights: torch.Tensor) -> torch.Tensor` to solve the following problem: Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. Returns: `torch.Tensor`: The merged tensor. Here is the function: def task_arithmetic(task_tensors: List[torch.Tensor], weights: torch.Tensor) -> torch.Tensor: """ Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. Returns: `torch.Tensor`: The merged tensor. """ task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors
Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. Returns: `torch.Tensor`: The merged tensor.
161,437
import warnings from typing import List, Literal import torch def reshape_weight_task_tensors(task_tensors, weights): """ Reshapes `weights` to match the shape of `task_tensors` by unsqeezing in the remaining dimenions. Args: task_tensors (`torch.Tensor`): The tensors that will be used to reshape `weights`. weights (`torch.Tensor`): The tensor to be reshaped. Returns: `torch.Tensor`: The reshaped tensor. """ new_shape = weights.shape + (1,) * (task_tensors.dim() - weights.dim()) weights = weights.view(new_shape) return weights def prune( tensor: torch.Tensor, density: float, method: Literal["magnitude", "random"], rescale: bool = False ) -> torch.Tensor: """ Prune the values of task tensors based on the `method`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. method (`str`):The method to use to prune. Should be one of ["magnitude", "random"]. rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. Returns: `torch.Tensor`: The pruned tensor. """ if density >= 1: warnings.warn(f"The density {density} is greater than or equal to 1, no pruning will be performed.") return tensor elif density < 0: raise ValueError(f"Density should be >= 0, got {density}") if method == "magnitude": return magnitude_based_pruning(tensor, density) elif method == "random": return random_pruning(tensor, density, rescale=rescale) else: raise ValueError(f"Unknown method {method}") The provided code snippet includes necessary dependencies for implementing the `magnitude_prune` function. Write a Python function `def magnitude_prune(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor` to solve the following problem: Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`): The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor. Here is the function: def magnitude_prune(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor: """ Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`): The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors
Merge the task tensors using `task arithmetic`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`): The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor.
161,438
import warnings from typing import List, Literal import torch def reshape_weight_task_tensors(task_tensors, weights): """ Reshapes `weights` to match the shape of `task_tensors` by unsqeezing in the remaining dimenions. Args: task_tensors (`torch.Tensor`): The tensors that will be used to reshape `weights`. weights (`torch.Tensor`): The tensor to be reshaped. Returns: `torch.Tensor`: The reshaped tensor. """ new_shape = weights.shape + (1,) * (task_tensors.dim() - weights.dim()) weights = weights.view(new_shape) return weights def prune( tensor: torch.Tensor, density: float, method: Literal["magnitude", "random"], rescale: bool = False ) -> torch.Tensor: """ Prune the values of task tensors based on the `method`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. method (`str`):The method to use to prune. Should be one of ["magnitude", "random"]. rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. Returns: `torch.Tensor`: The pruned tensor. """ if density >= 1: warnings.warn(f"The density {density} is greater than or equal to 1, no pruning will be performed.") return tensor elif density < 0: raise ValueError(f"Density should be >= 0, got {density}") if method == "magnitude": return magnitude_based_pruning(tensor, density) elif method == "random": return random_pruning(tensor, density, rescale=rescale) else: raise ValueError(f"Unknown method {method}") def calculate_majority_sign_mask( tensor: torch.Tensor, method: Literal["total", "frequency"] = "total" ) -> torch.Tensor: """ Get the mask of the majority sign across the task tensors. Task tensors are stacked on dimension 0. Args: tensor (`torch.Tensor`):The tensor to get the mask from. method (`str`):The method to use to get the mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The majority sign mask. """ sign = tensor.sign() if method == "total": sign_magnitude = tensor.sum(dim=0) elif method == "frequency": sign_magnitude = sign.sum(dim=0) else: raise RuntimeError(f'Unimplemented mask method "{method}"') majority_sign = torch.where(sign_magnitude >= 0, 1, -1) return sign == majority_sign def disjoint_merge(task_tensors: torch.Tensor, majority_sign_mask: torch.Tensor) -> torch.Tensor: """ Merge the task tensors using disjoint merge. Args: task_tensors (`torch.Tensor`):The task tensors to merge. majority_sign_mask (`torch.Tensor`):The mask of the majority sign across the task tensors. Returns: `torch.Tensor`: The merged tensor. """ mixed_task_tensors = (task_tensors * majority_sign_mask).sum(dim=0) num_params_preserved = majority_sign_mask.sum(dim=0) return mixed_task_tensors / torch.clamp(num_params_preserved, min=1.0) The provided code snippet includes necessary dependencies for implementing the `ties` function. Write a Python function `def ties( task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float, majority_sign_method: Literal["total", "frequency"] = "total", ) -> torch.Tensor` to solve the following problem: Merge the task tensors using `ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor. Here is the function: def ties( task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float, majority_sign_method: Literal["total", "frequency"] = "total", ) -> torch.Tensor: """ Merge the task tensors using `ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="magnitude") for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # Elect Sign majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights # Disjoint Merge mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) return mixed_task_tensors
Merge the task tensors using `ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor.
161,439
import warnings from typing import List, Literal import torch def reshape_weight_task_tensors(task_tensors, weights): """ Reshapes `weights` to match the shape of `task_tensors` by unsqeezing in the remaining dimenions. Args: task_tensors (`torch.Tensor`): The tensors that will be used to reshape `weights`. weights (`torch.Tensor`): The tensor to be reshaped. Returns: `torch.Tensor`: The reshaped tensor. """ new_shape = weights.shape + (1,) * (task_tensors.dim() - weights.dim()) weights = weights.view(new_shape) return weights def prune( tensor: torch.Tensor, density: float, method: Literal["magnitude", "random"], rescale: bool = False ) -> torch.Tensor: """ Prune the values of task tensors based on the `method`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. method (`str`):The method to use to prune. Should be one of ["magnitude", "random"]. rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. Returns: `torch.Tensor`: The pruned tensor. """ if density >= 1: warnings.warn(f"The density {density} is greater than or equal to 1, no pruning will be performed.") return tensor elif density < 0: raise ValueError(f"Density should be >= 0, got {density}") if method == "magnitude": return magnitude_based_pruning(tensor, density) elif method == "random": return random_pruning(tensor, density, rescale=rescale) else: raise ValueError(f"Unknown method {method}") The provided code snippet includes necessary dependencies for implementing the `dare_linear` function. Write a Python function `def dare_linear(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor` to solve the following problem: Merge the task tensors using `dare linear`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor. Here is the function: def dare_linear(task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float) -> torch.Tensor: """ Merge the task tensors using `dare linear`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights mixed_task_tensors = weighted_task_tensors.sum(dim=0) return mixed_task_tensors
Merge the task tensors using `dare linear`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. Returns: `torch.Tensor`: The merged tensor.
161,440
import warnings from typing import List, Literal import torch def reshape_weight_task_tensors(task_tensors, weights): """ Reshapes `weights` to match the shape of `task_tensors` by unsqeezing in the remaining dimenions. Args: task_tensors (`torch.Tensor`): The tensors that will be used to reshape `weights`. weights (`torch.Tensor`): The tensor to be reshaped. Returns: `torch.Tensor`: The reshaped tensor. """ new_shape = weights.shape + (1,) * (task_tensors.dim() - weights.dim()) weights = weights.view(new_shape) return weights def prune( tensor: torch.Tensor, density: float, method: Literal["magnitude", "random"], rescale: bool = False ) -> torch.Tensor: """ Prune the values of task tensors based on the `method`. Args: tensor (`torch.Tensor`):The tensor to prune. density (`float`):The fraction of values to preserve. Should be in [0,1]. method (`str`):The method to use to prune. Should be one of ["magnitude", "random"]. rescale (`bool`):Whether to rescale the result to preserve the expected value of the original tensor. Returns: `torch.Tensor`: The pruned tensor. """ if density >= 1: warnings.warn(f"The density {density} is greater than or equal to 1, no pruning will be performed.") return tensor elif density < 0: raise ValueError(f"Density should be >= 0, got {density}") if method == "magnitude": return magnitude_based_pruning(tensor, density) elif method == "random": return random_pruning(tensor, density, rescale=rescale) else: raise ValueError(f"Unknown method {method}") def calculate_majority_sign_mask( tensor: torch.Tensor, method: Literal["total", "frequency"] = "total" ) -> torch.Tensor: """ Get the mask of the majority sign across the task tensors. Task tensors are stacked on dimension 0. Args: tensor (`torch.Tensor`):The tensor to get the mask from. method (`str`):The method to use to get the mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The majority sign mask. """ sign = tensor.sign() if method == "total": sign_magnitude = tensor.sum(dim=0) elif method == "frequency": sign_magnitude = sign.sum(dim=0) else: raise RuntimeError(f'Unimplemented mask method "{method}"') majority_sign = torch.where(sign_magnitude >= 0, 1, -1) return sign == majority_sign def disjoint_merge(task_tensors: torch.Tensor, majority_sign_mask: torch.Tensor) -> torch.Tensor: """ Merge the task tensors using disjoint merge. Args: task_tensors (`torch.Tensor`):The task tensors to merge. majority_sign_mask (`torch.Tensor`):The mask of the majority sign across the task tensors. Returns: `torch.Tensor`: The merged tensor. """ mixed_task_tensors = (task_tensors * majority_sign_mask).sum(dim=0) num_params_preserved = majority_sign_mask.sum(dim=0) return mixed_task_tensors / torch.clamp(num_params_preserved, min=1.0) The provided code snippet includes necessary dependencies for implementing the `dare_ties` function. Write a Python function `def dare_ties( task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float, majority_sign_method: Literal["total", "frequency"] = "total", ) -> torch.Tensor` to solve the following problem: Merge the task tensors using `dare ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor. Here is the function: def dare_ties( task_tensors: List[torch.Tensor], weights: torch.Tensor, density: float, majority_sign_method: Literal["total", "frequency"] = "total", ) -> torch.Tensor: """ Merge the task tensors using `dare ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor. """ # sparsify task_tensors = [prune(tensor, density, method="random", rescale=True) for tensor in task_tensors] task_tensors = torch.stack(task_tensors, dim=0) # Elect Sign majority_sign_mask = calculate_majority_sign_mask(task_tensors, method=majority_sign_method) # weighted task tensors weights = reshape_weight_task_tensors(task_tensors, weights) weighted_task_tensors = task_tensors * weights # Disjoint Merge mixed_task_tensors = disjoint_merge(weighted_task_tensors, majority_sign_mask) return mixed_task_tensors
Merge the task tensors using `dare ties`. Args: task_tensors(`List[torch.Tensor]`):The task tensors to merge. weights (`torch.Tensor`):The weights of the task tensors. density (`float`):The fraction of values to preserve. Should be in [0,1]. majority_sign_method (`str`): The method to use to get the majority sign mask. Should be one of ["total", "frequency"]. Returns: `torch.Tensor`: The merged tensor.
161,441
import torch def bloom_model_postprocess_past_key_value(past_key_values): past_key_values = torch.cat(past_key_values) total_layers, batch_size, num_attention_heads, num_virtual_tokens, head_dim = past_key_values.shape keys = past_key_values[: total_layers // 2] keys = keys.transpose(2, 3).reshape( total_layers // 2, batch_size * num_attention_heads, head_dim, num_virtual_tokens ) values = past_key_values[total_layers // 2 :] values = values.reshape(total_layers // 2, batch_size * num_attention_heads, num_virtual_tokens, head_dim) return tuple(zip(keys, values))
null
161,442
import torch def starcoder_model_postprocess_past_key_value(past_key_values): result = [] for k in past_key_values: k = k[:, :, 0] k = k.permute([1, 2, 0, 3]) k = k.reshape(*k.shape[:-2], -1) result.append(k) return tuple(result)
null
161,443
import argparse import json import logging import os from collections import Counter from dataclasses import dataclass from operator import attrgetter from typing import Dict, List, Optional, Union import safetensors import torch import torch.nn as nn from diffusers import UNet2DConditionModel from transformers import CLIPTextModel from peft import LoHaConfig, LoKrConfig, LoraConfig, PeftType, get_peft_model, set_peft_model_state_dict from peft.tuners.lokr.layer import factorization class LoRAInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None lora_A: Optional[torch.Tensor] = None lora_B: Optional[torch.Tensor] = None def peft_state_dict(self) -> Dict[str, torch.Tensor]: if self.lora_A is None or self.lora_B is None: raise ValueError("At least one of lora_A or lora_B is None, they must both be provided") return { f"base_model.model.{self.peft_key}.lora_A.weight": self.lora_A, f"base_model.model.{self.peft_key}.lora_B.weight": self.lora_B, } The provided code snippet includes necessary dependencies for implementing the `construct_peft_loraconfig` function. Write a Python function `def construct_peft_loraconfig(info: Dict[str, LoRAInfo], **kwargs) -> LoraConfig` to solve the following problem: Constructs LoraConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoRAInfo]): Information extracted from adapter checkpoint Returns: LoraConfig: config for constructing LoRA Here is the function: def construct_peft_loraconfig(info: Dict[str, LoRAInfo], **kwargs) -> LoraConfig: """Constructs LoraConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoRAInfo]): Information extracted from adapter checkpoint Returns: LoraConfig: config for constructing LoRA """ # Unpack all ranks and alphas ranks = {key: val.rank for key, val in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = sorted(info.keys()) # Determine most common rank and alpha r = int(Counter(ranks.values()).most_common(1)[0][0]) lora_alpha = Counter(alphas.values()).most_common(1)[0][0] # Determine which modules have different rank and alpha rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0])) alpha_pattern = dict(sorted(filter(lambda x: x[1] != lora_alpha, alphas.items()), key=lambda x: x[0])) config = LoraConfig( r=r, lora_alpha=lora_alpha, target_modules=target_modules, lora_dropout=0.0, bias="none", init_lora_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, ) return config
Constructs LoraConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoRAInfo]): Information extracted from adapter checkpoint Returns: LoraConfig: config for constructing LoRA
161,444
import argparse import json import logging import os from collections import Counter from dataclasses import dataclass from operator import attrgetter from typing import Dict, List, Optional, Union import safetensors import torch import torch.nn as nn from diffusers import UNet2DConditionModel from transformers import CLIPTextModel from peft import LoHaConfig, LoKrConfig, LoraConfig, PeftType, get_peft_model, set_peft_model_state_dict from peft.tuners.lokr.layer import factorization class LoHaInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None hada_w1_a: Optional[torch.Tensor] = None hada_w1_b: Optional[torch.Tensor] = None hada_w2_a: Optional[torch.Tensor] = None hada_w2_b: Optional[torch.Tensor] = None hada_t1: Optional[torch.Tensor] = None hada_t2: Optional[torch.Tensor] = None def peft_state_dict(self) -> Dict[str, torch.Tensor]: if self.hada_w1_a is None or self.hada_w1_b is None or self.hada_w2_a is None or self.hada_w2_b is None: raise ValueError( "At least one of hada_w1_a, hada_w1_b, hada_w2_a, hada_w2_b is missing, they all must be provided" ) state_dict = { f"base_model.model.{self.peft_key}.hada_w1_a": self.hada_w1_a, f"base_model.model.{self.peft_key}.hada_w1_b": self.hada_w1_b, f"base_model.model.{self.peft_key}.hada_w2_a": self.hada_w2_a, f"base_model.model.{self.peft_key}.hada_w2_b": self.hada_w2_b, } if not ( (self.hada_t1 is None and self.hada_t2 is None) or (self.hada_t1 is not None and self.hada_t2 is not None) ): raise ValueError("hada_t1 and hada_t2 must be either both present or not present at the same time") if self.hada_t1 is not None and self.hada_t2 is not None: state_dict[f"base_model.model.{self.peft_key}.hada_t1"] = self.hada_t1 state_dict[f"base_model.model.{self.peft_key}.hada_t2"] = self.hada_t2 return state_dict The provided code snippet includes necessary dependencies for implementing the `construct_peft_lohaconfig` function. Write a Python function `def construct_peft_lohaconfig(info: Dict[str, LoHaInfo], **kwargs) -> LoHaConfig` to solve the following problem: Constructs LoHaConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoHaInfo]): Information extracted from adapter checkpoint Returns: LoHaConfig: config for constructing LoHA Here is the function: def construct_peft_lohaconfig(info: Dict[str, LoHaInfo], **kwargs) -> LoHaConfig: """Constructs LoHaConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoHaInfo]): Information extracted from adapter checkpoint Returns: LoHaConfig: config for constructing LoHA """ # Unpack all ranks and alphas ranks = {x[0]: x[1].rank for x in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = sorted(info.keys()) # Determine most common rank and alpha r = int(Counter(ranks.values()).most_common(1)[0][0]) alpha = Counter(alphas.values()).most_common(1)[0][0] # Determine which modules have different rank and alpha rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0])) alpha_pattern = dict(sorted(filter(lambda x: x[1] != alpha, alphas.items()), key=lambda x: x[0])) # Determine whether any of modules have effective conv2d decomposition use_effective_conv2d = any((val.hada_t1 is not None) or (val.hada_t2 is not None) for val in info.values()) config = LoHaConfig( r=r, alpha=alpha, target_modules=target_modules, rank_dropout=0.0, module_dropout=0.0, init_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, use_effective_conv2d=use_effective_conv2d, ) return config
Constructs LoHaConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoHaInfo]): Information extracted from adapter checkpoint Returns: LoHaConfig: config for constructing LoHA
161,445
import argparse import json import logging import os from collections import Counter from dataclasses import dataclass from operator import attrgetter from typing import Dict, List, Optional, Union import safetensors import torch import torch.nn as nn from diffusers import UNet2DConditionModel from transformers import CLIPTextModel from peft import LoHaConfig, LoKrConfig, LoraConfig, PeftType, get_peft_model, set_peft_model_state_dict from peft.tuners.lokr.layer import factorization class LoKrInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None lokr_w1: Optional[torch.Tensor] = None lokr_w1_a: Optional[torch.Tensor] = None lokr_w1_b: Optional[torch.Tensor] = None lokr_w2: Optional[torch.Tensor] = None lokr_w2_a: Optional[torch.Tensor] = None lokr_w2_b: Optional[torch.Tensor] = None lokr_t2: Optional[torch.Tensor] = None def peft_state_dict(self) -> Dict[str, torch.Tensor]: if (self.lokr_w1 is None) and ((self.lokr_w1_a is None) or (self.lokr_w1_b is None)): raise ValueError("Either lokr_w1 or both lokr_w1_a and lokr_w1_b should be provided") if (self.lokr_w2 is None) and ((self.lokr_w2_a is None) or (self.lokr_w2_b is None)): raise ValueError("Either lokr_w2 or both lokr_w2_a and lokr_w2_b should be provided") state_dict = {} if self.lokr_w1 is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w1"] = self.lokr_w1 elif self.lokr_w1_a is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w1_a"] = self.lokr_w1_a state_dict[f"base_model.model.{self.peft_key}.lokr_w1_b"] = self.lokr_w1_b if self.lokr_w2 is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w2"] = self.lokr_w2 elif self.lokr_w2_a is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w2_a"] = self.lokr_w2_a state_dict[f"base_model.model.{self.peft_key}.lokr_w2_b"] = self.lokr_w2_b if self.lokr_t2 is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_t2"] = self.lokr_t2 return state_dict def factorization(dimension: int, factor: int = -1) -> Tuple[int, int]: """Factorizes the provided number into the product of two numbers Args: dimension (`int`): The number that needs to be factorized. factor (`int`, optional): Factorization divider. The algorithm will try to output two numbers, one of each will be as close to the factor as possible. If -1 is provided, the decomposition algorithm would try to search dividers near the square root of the dimension. Defaults to -1. Returns: Tuple[`int`, `int`]: A tuple of two numbers, whose product is equal to the provided number. The first number is always less than or equal to the second. Example: ```py >>> factorization(256, factor=-1) (16, 16) >>> factorization(128, factor=-1) (8, 16) >>> factorization(127, factor=-1) (1, 127) >>> factorization(128, factor=4) (4, 32) ``` """ if factor > 0 and (dimension % factor) == 0: m = factor n = dimension // factor return m, n if factor == -1: factor = dimension m, n = 1, dimension length = m + n while m < n: new_m = m + 1 while dimension % new_m != 0: new_m += 1 new_n = dimension // new_m if new_m + new_n > length or new_m > factor: break else: m, n = new_m, new_n if m > n: n, m = m, n return m, n The provided code snippet includes necessary dependencies for implementing the `construct_peft_lokrconfig` function. Write a Python function `def construct_peft_lokrconfig(info: Dict[str, LoKrInfo], decompose_factor: int = -1, **kwargs) -> LoKrConfig` to solve the following problem: Constructs LoKrConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoKrInfo]): Information extracted from adapter checkpoint Returns: LoKrConfig: config for constructing LoKr Here is the function: def construct_peft_lokrconfig(info: Dict[str, LoKrInfo], decompose_factor: int = -1, **kwargs) -> LoKrConfig: """Constructs LoKrConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoKrInfo]): Information extracted from adapter checkpoint Returns: LoKrConfig: config for constructing LoKr """ # Unpack all ranks and alphas ranks = {x[0]: x[1].rank for x in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = sorted(info.keys()) # Determine most common rank and alpha r = int(Counter(ranks.values()).most_common(1)[0][0]) alpha = Counter(alphas.values()).most_common(1)[0][0] # Determine which modules have different rank and alpha rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0])) alpha_pattern = dict(sorted(filter(lambda x: x[1] != alpha, alphas.items()), key=lambda x: x[0])) # Determine whether any of modules have effective conv2d decomposition use_effective_conv2d = any((val.lokr_t2 is not None) for val in info.values()) # decompose_both should be enabled if any w1 matrix in any layer is decomposed into 2 decompose_both = any((val.lokr_w1_a is not None and val.lokr_w1_b is not None) for val in info.values()) # Determining decompose factor is a bit tricky (but it is most often -1) # Check that decompose_factor is equal to provided for val in info.values(): # Determine shape of first matrix if val.lokr_w1 is not None: w1_shape = tuple(val.lokr_w1.shape) else: w1_shape = (val.lokr_w1_a.shape[0], val.lokr_w1_b.shape[1]) # Determine shape of second matrix if val.lokr_w2 is not None: w2_shape = tuple(val.lokr_w2.shape[:2]) elif val.lokr_t2 is not None: w2_shape = (val.lokr_w2_a.shape[1], val.lokr_w2_b.shape[1]) else: # We may iterate over Conv2d layer, for which second item in shape is multiplied by ksize^2 w2_shape = (val.lokr_w2_a.shape[0], val.lokr_w2_b.shape[1]) # We need to check, whether decompose_factor is really -1 or not shape = (w1_shape[0], w2_shape[0]) if factorization(shape[0] * shape[1], factor=-1) != shape: raise ValueError("Cannot infer decompose_factor, probably it is not equal to -1") config = LoKrConfig( r=r, alpha=alpha, target_modules=target_modules, rank_dropout=0.0, module_dropout=0.0, init_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, use_effective_conv2d=use_effective_conv2d, decompose_both=decompose_both, decompose_factor=decompose_factor, ) return config
Constructs LoKrConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoKrInfo]): Information extracted from adapter checkpoint Returns: LoKrConfig: config for constructing LoKr
161,446
import argparse import json import logging import os from collections import Counter from dataclasses import dataclass from operator import attrgetter from typing import Dict, List, Optional, Union import safetensors import torch import torch.nn as nn from diffusers import UNet2DConditionModel from transformers import CLIPTextModel from peft import LoHaConfig, LoKrConfig, LoraConfig, PeftType, get_peft_model, set_peft_model_state_dict from peft.tuners.lokr.layer import factorization class LoRAInfo: def peft_state_dict(self) -> Dict[str, torch.Tensor]: class LoHaInfo: def peft_state_dict(self) -> Dict[str, torch.Tensor]: def combine_peft_state_dict(info: Dict[str, Union[LoRAInfo, LoHaInfo]]) -> Dict[str, torch.Tensor]: result = {} for key_info in info.values(): result.update(key_info.peft_state_dict()) return result
null
161,447
import argparse import json import logging import os from collections import Counter from dataclasses import dataclass from operator import attrgetter from typing import Dict, List, Optional, Union import safetensors import torch import torch.nn as nn from diffusers import UNet2DConditionModel from transformers import CLIPTextModel from peft import LoHaConfig, LoKrConfig, LoraConfig, PeftType, get_peft_model, set_peft_model_state_dict from peft.tuners.lokr.layer import factorization def detect_adapter_type(keys: List[str]) -> PeftType: # Detect type of adapter by keys # Inspired by this: # https://github.com/bmaltais/kohya_ss/blob/ed4e3b0239a40506de9a17e550e6cf2d0b867a4f/tools/lycoris_utils.py#L312 for key in keys: if "alpha" in key: continue elif any(x in key for x in ["lora_down", "lora_up"]): # LoRA return PeftType.LORA elif any(x in key for x in ["hada_w1", "hada_w2", "hada_t1", "hada_t2"]): # LoHa may have the following keys: # hada_w1_a, hada_w1_b, hada_w2_a, hada_w2_b, hada_t1, hada_t2 return PeftType.LOHA elif any(x in key for x in ["lokr_w1", "lokr_w2", "lokr_t1", "lokr_t2"]): # LoKr may have the following keys: # lokr_w1, lokr_w2, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t1, lokr_t2 return PeftType.LOKR elif "diff" in key: raise ValueError("Currently full diff adapters are not implemented") else: raise ValueError("Unknown adapter type, probably not implemented")
null
161,448
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from pathlib import Path from typing import Optional, Union import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoHaConfig, LoKrConfig, LoraConfig, get_peft_model def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.")
null
161,449
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from pathlib import Path from typing import Optional, Union import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoHaConfig, LoKrConfig, LoraConfig, get_peft_model UNET_TARGET_MODULES = [ "to_q", "to_k", "to_v", "proj", "proj_in", "proj_out", "conv", "conv1", "conv2", "conv_shortcut", "to_out.0", "time_emb_proj", "ff.net.2", ] def create_unet_adapter_config(args: argparse.Namespace) -> Union[LoraConfig, LoHaConfig, LoKrConfig]: if args.adapter == "full": raise ValueError("Cannot create unet adapter config for full parameter") if args.adapter == "lora": config = LoraConfig( r=args.unet_r, lora_alpha=args.unet_alpha, target_modules=UNET_TARGET_MODULES, lora_dropout=args.unet_dropout, bias=args.unet_bias, init_lora_weights=True, ) elif args.adapter == "loha": config = LoHaConfig( r=args.unet_r, alpha=args.unet_alpha, target_modules=UNET_TARGET_MODULES, rank_dropout=args.unet_rank_dropout, module_dropout=args.unet_module_dropout, use_effective_conv2d=args.unet_use_effective_conv2d, init_weights=True, ) elif args.adapter == "lokr": config = LoKrConfig( r=args.unet_r, alpha=args.unet_alpha, target_modules=UNET_TARGET_MODULES, rank_dropout=args.unet_rank_dropout, module_dropout=args.unet_module_dropout, use_effective_conv2d=args.unet_use_effective_conv2d, decompose_both=args.unet_decompose_both, decompose_factor=args.unet_decompose_factor, init_weights=True, ) else: raise ValueError(f"Unknown adapter type {args.adapter}") return config
null
161,450
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from pathlib import Path from typing import Optional, Union import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoHaConfig, LoKrConfig, LoraConfig, get_peft_model TEXT_ENCODER_TARGET_MODULES = ["fc1", "fc2", "q_proj", "k_proj", "v_proj", "out_proj"] def create_text_encoder_adapter_config(args: argparse.Namespace) -> Union[LoraConfig, LoHaConfig, LoKrConfig]: if args.adapter == "full": raise ValueError("Cannot create text_encoder adapter config for full parameter") if args.adapter == "lora": config = LoraConfig( r=args.te_r, lora_alpha=args.te_alpha, target_modules=TEXT_ENCODER_TARGET_MODULES, lora_dropout=args.te_dropout, bias=args.te_bias, init_lora_weights=True, ) elif args.adapter == "loha": config = LoHaConfig( r=args.te_r, alpha=args.te_alpha, target_modules=TEXT_ENCODER_TARGET_MODULES, rank_dropout=args.te_rank_dropout, module_dropout=args.te_module_dropout, init_weights=True, ) elif args.adapter == "lokr": config = LoKrConfig( r=args.te_r, alpha=args.te_alpha, target_modules=TEXT_ENCODER_TARGET_MODULES, rank_dropout=args.te_rank_dropout, module_dropout=args.te_module_dropout, decompose_both=args.te_decompose_both, decompose_factor=args.te_decompose_factor, init_weights=True, ) else: raise ValueError(f"Unknown adapter type {args.adapter}") return config
null
161,451
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from pathlib import Path from typing import Optional, Union import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoHaConfig, LoKrConfig, LoraConfig, get_peft_model def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=True, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run dreambooth validation every X steps. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--wandb_key", type=str, default=None, help=("If report to option is set to wandb, api-key for wandb used for login to wandb "), ) parser.add_argument( "--wandb_project_name", type=str, default=None, help=("If report to option is set to wandb, project name in wandb for log tracking "), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) # Adapter arguments subparsers = parser.add_subparsers(dest="adapter") # Dummy subparser to train whole model subparsers.add_parser("full", help="Train full model without adapters") # LoRA adapter lora = subparsers.add_parser("lora", help="Use LoRA adapter") lora.add_argument("--unet_r", type=int, default=8, help="LoRA rank for unet") lora.add_argument("--unet_alpha", type=int, default=8, help="LoRA alpha for unet") lora.add_argument("--unet_dropout", type=float, default=0.0, help="LoRA dropout probability for unet") lora.add_argument( "--unet_bias", type=str, default="none", help="Bias type for LoRA. Can be 'none', 'all' or 'lora_only'", ) lora.add_argument( "--te_r", type=int, default=8, help="LoRA rank for text_encoder, only used if `train_text_encoder` is True" ) lora.add_argument( "--te_alpha", type=int, default=8, help="LoRA alpha for text_encoder, only used if `train_text_encoder` is True", ) lora.add_argument( "--te_dropout", type=float, default=0.0, help="LoRA dropout probability for text_encoder, only used if `train_text_encoder` is True", ) lora.add_argument( "--te_bias", type=str, default="none", help="Bias type for LoRA. Can be 'none', 'all' or 'lora_only', only used if `train_text_encoder` is True", ) # LoHa adapter loha = subparsers.add_parser("loha", help="Use LoHa adapter") loha.add_argument("--unet_r", type=int, default=8, help="LoHa rank for unet") loha.add_argument("--unet_alpha", type=int, default=8, help="LoHa alpha for unet") loha.add_argument("--unet_rank_dropout", type=float, default=0.0, help="LoHa rank_dropout probability for unet") loha.add_argument( "--unet_module_dropout", type=float, default=0.0, help="LoHa module_dropout probability for unet" ) loha.add_argument( "--unet_use_effective_conv2d", action="store_true", help="Use parameter effective decomposition in unet for Conv2d 3x3 with ksize > 1", ) loha.add_argument( "--te_r", type=int, default=8, help="LoHa rank for text_encoder, only used if `train_text_encoder` is True" ) loha.add_argument( "--te_alpha", type=int, default=8, help="LoHa alpha for text_encoder, only used if `train_text_encoder` is True", ) loha.add_argument( "--te_rank_dropout", type=float, default=0.0, help="LoHa rank_dropout probability for text_encoder, only used if `train_text_encoder` is True", ) loha.add_argument( "--te_module_dropout", type=float, default=0.0, help="LoHa module_dropout probability for text_encoder, only used if `train_text_encoder` is True", ) # LoKr adapter lokr = subparsers.add_parser("lokr", help="Use LoKr adapter") lokr.add_argument("--unet_r", type=int, default=8, help="LoKr rank for unet") lokr.add_argument("--unet_alpha", type=int, default=8, help="LoKr alpha for unet") lokr.add_argument("--unet_rank_dropout", type=float, default=0.0, help="LoKr rank_dropout probability for unet") lokr.add_argument( "--unet_module_dropout", type=float, default=0.0, help="LoKr module_dropout probability for unet" ) lokr.add_argument( "--unet_use_effective_conv2d", action="store_true", help="Use parameter effective decomposition in unet for Conv2d 3x3 with ksize > 1", ) lokr.add_argument( "--unet_decompose_both", action="store_true", help="Decompose left matrix in kronecker product for unet" ) lokr.add_argument( "--unet_decompose_factor", type=int, default=-1, help="Decompose factor in kronecker product for unet" ) lokr.add_argument( "--te_r", type=int, default=8, help="LoKr rank for text_encoder, only used if `train_text_encoder` is True" ) lokr.add_argument( "--te_alpha", type=int, default=8, help="LoKr alpha for text_encoder, only used if `train_text_encoder` is True", ) lokr.add_argument( "--te_rank_dropout", type=float, default=0.0, help="LoKr rank_dropout probability for text_encoder, only used if `train_text_encoder` is True", ) lokr.add_argument( "--te_module_dropout", type=float, default=0.0, help="LoKr module_dropout probability for text_encoder, only used if `train_text_encoder` is True", ) lokr.add_argument( "--te_decompose_both", action="store_true", help="Decompose left matrix in kronecker product for text_encoder, only used if `train_text_encoder` is True", ) lokr.add_argument( "--te_decompose_factor", type=int, default=-1, help="Decompose factor in kronecker product for text_encoder, only used if `train_text_encoder` is True", ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") return args
null
161,452
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from pathlib import Path from typing import Optional, Union import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoHaConfig, LoKrConfig, LoraConfig, get_peft_model def b2mb(x): return int(x / 2**20)
null
161,453
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from pathlib import Path from typing import Optional, Union import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoHaConfig, LoKrConfig, LoraConfig, get_peft_model def collate_fn(examples, with_prior_preservation=False): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.cat(input_ids, dim=0) batch = { "input_ids": input_ids, "pixel_values": pixel_values, } return batch
null
161,454
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from pathlib import Path from typing import Optional, Union import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoHaConfig, LoKrConfig, LoraConfig, get_peft_model def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): if token is None: token = HfFolder.get_token() if organization is None: username = whoami(token)["name"] return f"{username}/{model_id}" else: return f"{organization}/{model_id}"
null
161,455
import argparse import logging import math import os import random from pathlib import Path import datasets import evaluate import torch import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import DatasetDict, load_dataset from huggingface_hub import Repository, create_repo from torch import nn from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModel, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler from transformers.utils import get_full_repo_name from peft import LoraConfig, TaskType, get_peft_model def parse_args(): parser = argparse.ArgumentParser(description="Training a PEFT model for Semantic Search task") parser.add_argument("--dataset_name", type=str, default=None, help="dataset name on HF hub") parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_length` is passed." ), ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument( "--sanity_test", action="store_true", help="Whether to enable sanity test.", ) parser.add_argument( "--use_peft", action="store_true", help="Whether to use PEFT.", ) args = parser.parse_args() if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
null
161,456
import argparse import logging import math import os import random from pathlib import Path import datasets import evaluate import torch import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import DatasetDict, load_dataset from huggingface_hub import Repository, create_repo from torch import nn from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModel, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler from transformers.utils import get_full_repo_name from peft import LoraConfig, TaskType, get_peft_model def save_model_hook(models, weights, output_dir): for i, model in enumerate(models): model.save_pretrained(output_dir, state_dict=weights[i]) # make sure to pop weight so that corresponding model is not saved again weights.pop()
null
161,457
import argparse import logging import math import os import random from pathlib import Path import datasets import evaluate import torch import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import DatasetDict, load_dataset from huggingface_hub import Repository, create_repo from torch import nn from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModel, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler from transformers.utils import get_full_repo_name from peft import LoraConfig, TaskType, get_peft_model def load_model_hook(models, input_dir): while len(models) > 0: model = models.pop() # pop models so that they are not loaded again if hasattr(model, "active_adapter") and hasattr(model, "load_adapter"): model.load_adapter(input_dir, model.active_adapter, is_trainable=True)
null
161,458
import argparse import logging import math import os import random from pathlib import Path import datasets import evaluate import torch import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import DatasetDict, load_dataset from huggingface_hub import Repository, create_repo from torch import nn from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModel, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler from transformers.utils import get_full_repo_name from peft import LoraConfig, TaskType, get_peft_model def get_cosing_embeddings(query_embs, product_embs): return torch.sum(query_embs * product_embs, axis=1)
null
161,459
import argparse import logging import math import os import random from pathlib import Path import datasets import evaluate import torch import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import DatasetDict, load_dataset from huggingface_hub import Repository, create_repo from torch import nn from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModel, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler from transformers.utils import get_full_repo_name from peft import LoraConfig, TaskType, get_peft_model def get_loss(cosine_score, labels): return torch.mean(torch.square(labels * (1 - cosine_score) + torch.clamp((1 - labels) * cosine_score, min=0.0)))
null
161,460
import gc import os import sys import threading import psutil import torch from accelerate import Accelerator from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( AutoModelForCausalLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup, set_seed, ) from peft import LoraConfig, TaskType, get_peft_model def levenshtein_distance(str1, str2): def get_closest_label(eval_pred, classes): min_id = sys.maxsize min_edit_distance = sys.maxsize for i, class_label in enumerate(classes): edit_distance = levenshtein_distance(eval_pred.strip(), class_label) if edit_distance < min_edit_distance: min_id = i min_edit_distance = edit_distance return classes[min_id]
null
161,461
import gc import os import sys import threading import psutil import torch from accelerate import Accelerator from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( AutoModelForCausalLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup, set_seed, ) from peft import LoraConfig, TaskType, get_peft_model def b2mb(x): return int(x / 2**20)
null
161,462
import os import torch import torch.nn as nn import transformers from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from peft import LoraConfig, get_peft_model print(model) for param in model.parameters(): param.requires_grad = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability param.data = param.data.to(torch.float32) for _, p in model.named_parameters(): dtype = p.dtype if dtype not in dtypes: dtypes[dtype] = 0 dtypes[dtype] += p.numel() print("\n\n", tokenizer.decode(output_tokens[0], skip_special_tokens=True)) The provided code snippet includes necessary dependencies for implementing the `print_trainable_parameters` function. Write a Python function `def print_trainable_parameters(model)` to solve the following problem: Prints the number of trainable parameters in the model. Here is the function: def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" )
Prints the number of trainable parameters in the model.
161,463
import argparse import gc import json import logging import math import os from dataclasses import dataclass from datetime import datetime from pathlib import Path from random import randint from typing import Any, Dict, List, Union import datasets import evaluate import numpy as np import torch import transformers import wandb from accelerate import Accelerator, dispatch_model from accelerate.logging import get_logger from datasets import Audio, DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset from huggingface_hub import Repository from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( BitsAndBytesConfig, SchedulerType, WhisperForConditionalGeneration, WhisperProcessor, get_scheduler, set_seed, ) from transformers.models.whisper.english_normalizer import BasicTextNormalizer from transformers.utils import get_full_repo_name from peft import AdaLoraConfig, LoraConfig, PeftModel, get_peft_model def parse_args(): parser = argparse.ArgumentParser(description="Whisper Fine-Tuning with AdaLora") parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument("--language", type=str, help="Language to use for training; e.g., 'Hindi' ", required=True) parser.add_argument("--language_abbr", type=str, help="Language to use for training; e.g., 'hi' ", required=True) parser.add_argument( "--task", type=str, default="transcribe", help="Task to use for training; e.g., 'transcribe' ", required=False ) parser.add_argument( "--dataset_name", type=str, default="mozilla-foundation/common_voice_11_0", help="Dataset to use for training; e.g., 'whisper' ", required=False, ) parser.add_argument( "--dataset_in_streaming_mode", action="store_true", help="Whether to use streaming mode for the dataset.", ) parser.add_argument( "--do_lower_case", action="store_true", help="lowercase the transcribed text before tokenizing" ) parser.add_argument( "--do_remove_punctuation", action="store_true", help="remove punctuation from the transcribed text" ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--max_audio_input_length", type=float, default=30.0, help="Maximum audio length in seconds.") parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--buffer_size", type=int, default=5000, help="Number of samples to prefetch in the streaming mode.", ) parser.add_argument( "--dataloader_pin_memory", action="store_true", help="Whether or not to pin memory for the DataLoader.", ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help="Number of subprocesses to use for data loading.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--load_best_model", action="store_true", help="Whether to load the best model at the end of training", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--logging_steps", type=int, default=100, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--evaluation_steps", type=int, default=500, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) # lora/adalora specific args parser.add_argument( "--use_peft", action="store_true", help="Whether to use PEFT", ) parser.add_argument( "--use_adalora", action="store_true", help="Whether to use AdaLoRA or LoRA. If set, uses AdaLoRA instead of the default LoRA.", ) parser.add_argument( "--init_r", type=int, default=12, help="Initial AdaLoRA rank", ) parser.add_argument( "--target_r", type=int, default=4, help="Target AdaLoRA rank", ) parser.add_argument( "--tinit", type=int, default=200, help="number of warmup steps for AdaLoRA wherein no pruning is performed", ) parser.add_argument( "--tfinal", type=int, default=1000, help=" fix the resulting budget distribution and fine-tune the model for tfinal steps when using AdaLoRA ", ) parser.add_argument( "--delta_t", type=int, default=10, help="interval of steps for AdaLoRA to update rank", ) parser.add_argument( "--lora_alpha", type=int, default=32, help="LORA alpha", ) parser.add_argument( "--r", type=int, default=8, help="LORA rank", ) parser.add_argument( "--lora_dropout", type=float, default=0.1, help="LORA dropout", ) parser.add_argument( "--orth_reg_weight", type=float, default=0.5, help="Orthogonal regularization weight", ) parser.add_argument( "--debug_mode", action="store_true", help="Whether to use debug mode", ) args = parser.parse_args() if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
null
161,464
import argparse import gc import json import logging import math import os from dataclasses import dataclass from datetime import datetime from pathlib import Path from random import randint from typing import Any, Dict, List, Union import datasets import evaluate import numpy as np import torch import transformers import wandb from accelerate import Accelerator, dispatch_model from accelerate.logging import get_logger from datasets import Audio, DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset from huggingface_hub import Repository from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( BitsAndBytesConfig, SchedulerType, WhisperForConditionalGeneration, WhisperProcessor, get_scheduler, set_seed, ) from transformers.models.whisper.english_normalizer import BasicTextNormalizer from transformers.utils import get_full_repo_name from peft import AdaLoraConfig, LoraConfig, PeftModel, get_peft_model def load_streaming_dataset(dataset_name, dataset_config_name, split, **kwargs): if "+" in split: # load multiple splits separated by the `+` symbol *with* streaming mode dataset_splits = [ load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=True, **kwargs) for split_name in split.split("+") ] # interleave multiple splits to form one dataset interleaved_dataset = interleave_datasets(dataset_splits) return interleaved_dataset else: # load a single split *with* streaming mode dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=True, **kwargs) return dataset
null
161,465
import argparse import gc import json import logging import math import os from dataclasses import dataclass from datetime import datetime from pathlib import Path from random import randint from typing import Any, Dict, List, Union import datasets import evaluate import numpy as np import torch import transformers import wandb from accelerate import Accelerator, dispatch_model from accelerate.logging import get_logger from datasets import Audio, DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset from huggingface_hub import Repository from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( BitsAndBytesConfig, SchedulerType, WhisperForConditionalGeneration, WhisperProcessor, get_scheduler, set_seed, ) from transformers.models.whisper.english_normalizer import BasicTextNormalizer from transformers.utils import get_full_repo_name from peft import AdaLoraConfig, LoraConfig, PeftModel, get_peft_model def prepare_dataset_wrapper(do_lower_case, do_remove_punctuation, processor, normalizer): def prepare_dataset(batch): # load and (possibly) resample audio data to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = processor.feature_extractor( audio["array"], sampling_rate=audio["sampling_rate"] ).input_features[0] # compute input length of audio sample in seconds batch["input_length"] = len(audio["array"]) / audio["sampling_rate"] # optional pre-processing steps transcription = batch["sentence"] if do_lower_case: transcription = transcription.lower() if do_remove_punctuation: transcription = normalizer(transcription).strip() # encode target text to label ids batch["labels"] = processor.tokenizer(transcription).input_ids return batch return prepare_dataset
null
161,466
import argparse import gc import json import logging import math import os from dataclasses import dataclass from datetime import datetime from pathlib import Path from random import randint from typing import Any, Dict, List, Union import datasets import evaluate import numpy as np import torch import transformers import wandb from accelerate import Accelerator, dispatch_model from accelerate.logging import get_logger from datasets import Audio, DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset from huggingface_hub import Repository from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( BitsAndBytesConfig, SchedulerType, WhisperForConditionalGeneration, WhisperProcessor, get_scheduler, set_seed, ) from transformers.models.whisper.english_normalizer import BasicTextNormalizer from transformers.utils import get_full_repo_name from peft import AdaLoraConfig, LoraConfig, PeftModel, get_peft_model def save_model_hook(models, weights, output_dir): for model in models: model.save_pretrained(output_dir) # make sure to pop weight so that corresponding model is not saved again weights.pop()
null
161,467
import argparse import gc import json import logging import math import os from dataclasses import dataclass from datetime import datetime from pathlib import Path from random import randint from typing import Any, Dict, List, Union import datasets import evaluate import numpy as np import torch import transformers import wandb from accelerate import Accelerator, dispatch_model from accelerate.logging import get_logger from datasets import Audio, DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset from huggingface_hub import Repository from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( BitsAndBytesConfig, SchedulerType, WhisperForConditionalGeneration, WhisperProcessor, get_scheduler, set_seed, ) from transformers.models.whisper.english_normalizer import BasicTextNormalizer from transformers.utils import get_full_repo_name from peft import AdaLoraConfig, LoraConfig, PeftModel, get_peft_model def load_model_hook(models, input_dir): while len(models) > 0: model = models.pop() # pop models so that they are not loaded again PeftModel.from_pretrained(model.base_model.model, input_dir)
null
161,468
import argparse import gc import json import logging import math import os from dataclasses import dataclass from datetime import datetime from pathlib import Path from random import randint from typing import Any, Dict, List, Union import datasets import evaluate import numpy as np import torch import transformers import wandb from accelerate import Accelerator, dispatch_model from accelerate.logging import get_logger from datasets import Audio, DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset from huggingface_hub import Repository from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( BitsAndBytesConfig, SchedulerType, WhisperForConditionalGeneration, WhisperProcessor, get_scheduler, set_seed, ) from transformers.models.whisper.english_normalizer import BasicTextNormalizer from transformers.utils import get_full_repo_name from peft import AdaLoraConfig, LoraConfig, PeftModel, get_peft_model def get_audio_length_processor(max_input_length): def is_audio_in_length_range(length): return length < max_input_length return is_audio_in_length_range
null
161,469
import argparse import gc import json import logging import math import os from dataclasses import dataclass from datetime import datetime from pathlib import Path from random import randint from typing import Any, Dict, List, Union import datasets import evaluate import numpy as np import torch import transformers import wandb from accelerate import Accelerator, dispatch_model from accelerate.logging import get_logger from datasets import Audio, DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset from huggingface_hub import Repository from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( BitsAndBytesConfig, SchedulerType, WhisperForConditionalGeneration, WhisperProcessor, get_scheduler, set_seed, ) from transformers.models.whisper.english_normalizer import BasicTextNormalizer from transformers.utils import get_full_repo_name from peft import AdaLoraConfig, LoraConfig, PeftModel, get_peft_model def evaluation_loop(model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator): model.eval() predictions = [] references = [] normalized_predictions = [] normalized_references = [] for _, batch in enumerate(tqdm(eval_dataloader)): with torch.cuda.amp.autocast(): with torch.no_grad(): generated_tokens = ( model.generate( input_features=batch["input_features"], forced_decoder_ids=forced_decoder_ids, max_new_tokens=255, ) .cpu() .numpy() ) labels = batch["labels"].cpu().numpy() labels = np.where(labels != -100, labels, processor.tokenizer.pad_token_id) decoded_preds = processor.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) decoded_labels = processor.tokenizer.batch_decode(labels, skip_special_tokens=True) predictions.extend(decoded_preds) references.extend(decoded_labels) normalized_predictions.extend([normalizer(pred).strip() for pred in decoded_preds]) normalized_references.extend([normalizer(label).strip() for label in decoded_labels]) del generated_tokens, labels, batch gc.collect() wer = 100 * metric.compute(predictions=predictions, references=references) normalized_wer = 100 * metric.compute(predictions=normalized_predictions, references=normalized_references) eval_metrics = {"eval/wer": wer, "eval/normalized_wer": normalized_wer} if accelerator.get_tracker("wandb"): sample_size = min(len(predictions), 256) ids = [randint(0, len(predictions) - 1) for p in range(0, sample_size)] sample_predictions = [predictions[i] for i in ids] sample_references = [references[i] for i in ids] sample_normalized_predictions = [normalized_predictions[i] for i in ids] sample_normalized_references = [normalized_references[i] for i in ids] table_rows = [ list(r) for r in zip( sample_predictions, sample_references, sample_normalized_predictions, sample_normalized_references ) ] eval_metrics["eval_samples"] = wandb.Table( columns=["predictions", "references", "normalized_predictions", "normalized_references"], rows=table_rows, ) return eval_metrics
null
161,470
import torch from datasets import load_dataset from torch.utils.data import DataLoader, Dataset from transformers import AutoModelForVision2Seq, AutoProcessor, BitsAndBytesConfig from peft import LoraConfig, get_peft_model processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") def collator(batch): # pad the input_ids and attention_mask processed_batch = {} for key in batch[0].keys(): if key != "text": processed_batch[key] = torch.stack([example[key] for example in batch]) else: text_inputs = processor.tokenizer( [example["text"] for example in batch], padding=True, return_tensors="pt" ) processed_batch["input_ids"] = text_inputs["input_ids"] processed_batch["attention_mask"] = text_inputs["attention_mask"] return processed_batch
null
161,471
import os import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup from peft import AdaLoraConfig, PeftConfig, PeftModel, TaskType, get_peft_model text_column = "sentence" label_column = "text_label" max_length = 128 tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) inputs = tokenizer(dataset["validation"][text_column][i], return_tensors="pt") def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=3, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs
null
161,472
import gc import os import sys import threading import psutil import torch from accelerate import Accelerator from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from peft import LoraConfig, TaskType, get_peft_model def levenshtein_distance(str1, str2): # TC: O(N^2) # SC: O(N) if str1 == str2: return 0 num_rows = len(str1) + 1 num_cols = len(str2) + 1 dp_matrix = list(range(num_cols)) for i in range(1, num_rows): prev = dp_matrix[0] dp_matrix[0] = i for j in range(1, num_cols): temp = dp_matrix[j] if str1[i - 1] == str2[j - 1]: dp_matrix[j] = prev else: dp_matrix[j] = min(prev, dp_matrix[j], dp_matrix[j - 1]) + 1 prev = temp return dp_matrix[num_cols - 1] def get_closest_label(eval_pred, classes): min_id = sys.maxsize min_edit_distance = sys.maxsize for i, class_label in enumerate(classes): edit_distance = levenshtein_distance(eval_pred.strip(), class_label) if edit_distance < min_edit_distance: min_id = i min_edit_distance = edit_distance return classes[min_id]
null
161,473
import gc import os import sys import threading import psutil import torch from accelerate import Accelerator from datasets import load_dataset from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from peft import LoraConfig, TaskType, get_peft_model def b2mb(x): return int(x / 2**20)
null
161,474
import argparse import os from typing import Dict import torch from diffusers import UNet2DConditionModel from safetensors.torch import save_file from transformers import CLIPTextModel from peft import PeftModel, get_peft_model_state_dict LORA_ADAPTER_NAME = "default" def get_module_kohya_state_dict( module: PeftModel, prefix: str, dtype: torch.dtype, adapter_name: str = LORA_ADAPTER_NAME ) -> Dict[str, torch.Tensor]: kohya_ss_state_dict = {} for peft_key, weight in get_peft_model_state_dict(module, adapter_name=adapter_name).items(): kohya_key = peft_key.replace("base_model.model", prefix) kohya_key = kohya_key.replace("lora_A", "lora_down") kohya_key = kohya_key.replace("lora_B", "lora_up") kohya_key = kohya_key.replace(".", "_", kohya_key.count(".") - 2) kohya_ss_state_dict[kohya_key] = weight.to(dtype) # Set alpha parameter if "lora_down" in kohya_key: alpha_key = f'{kohya_key.split(".")[0]}.alpha' kohya_ss_state_dict[alpha_key] = torch.tensor(module.peft_config[adapter_name].lora_alpha).to(dtype) return kohya_ss_state_dict
null
161,475
import argparse import os from collections import Counter from dataclasses import dataclass from typing import Dict, Optional import safetensors import torch from diffusers import UNet2DConditionModel from transformers import CLIPTextModel from peft import LoraConfig, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict class LoRAInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None lora_A: Optional[torch.Tensor] = None lora_B: Optional[torch.Tensor] = None def peft_state_dict(self) -> Dict[str, torch.Tensor]: if self.lora_A is None or self.lora_B is None: raise ValueError("At least one of lora_A or lora_B is None, they must both be provided") return {f"{peft_key}.lora_A.weight": self.lora_A, f"{peft_key}.lora_B.weight": self.lora_A} The provided code snippet includes necessary dependencies for implementing the `construct_peft_loraconfig` function. Write a Python function `def construct_peft_loraconfig(info: Dict[str, LoRAInfo]) -> LoraConfig` to solve the following problem: Constructs LoraConfig from data extracted from kohya checkpoint Args: info (Dict[str, LoRAInfo]): Information extracted from kohya checkpoint Returns: LoraConfig: config for constructing LoRA Here is the function: def construct_peft_loraconfig(info: Dict[str, LoRAInfo]) -> LoraConfig: """Constructs LoraConfig from data extracted from kohya checkpoint Args: info (Dict[str, LoRAInfo]): Information extracted from kohya checkpoint Returns: LoraConfig: config for constructing LoRA """ # Unpack all ranks and alphas ranks = {x[0]: x[1].rank for x in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = list(info.keys()) # Determine most common rank and alpha r = Counter(ranks.values()).most_common(1)[0] lora_alpha = Counter(alphas.values()).most_common(1)[0] # Determine which modules have different rank and alpha rank_pattern = dict(filter(lambda x: x[1] != r, ranks.items())) alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, alphas.items())) config = LoraConfig( r=r, lora_alpha=lora_alpha, target_modules=target_modules, lora_dropout=0.0, bias="none", init_lora_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, ) return config
Constructs LoraConfig from data extracted from kohya checkpoint Args: info (Dict[str, LoRAInfo]): Information extracted from kohya checkpoint Returns: LoraConfig: config for constructing LoRA
161,476
import argparse import os from collections import Counter from dataclasses import dataclass from typing import Dict, Optional import safetensors import torch from diffusers import UNet2DConditionModel from transformers import CLIPTextModel from peft import LoraConfig, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict class LoRAInfo: def peft_state_dict(self) -> Dict[str, torch.Tensor]: def combine_peft_state_dict(info: Dict[str, LoRAInfo]) -> Dict[str, torch.Tensor]: result = {} for key_name, key_info in info.items(): result[f"base_model.model.{key_name}.lora_A.weight"] = key_info.lora_A result[f"base_model.model.{key_name}.lora_B.weight"] = key_info.lora_B return result
null
161,477
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from contextlib import nullcontext from pathlib import Path from typing import Optional import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoraConfig, get_peft_model def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.")
null
161,478
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from contextlib import nullcontext from pathlib import Path from typing import Optional import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoraConfig, get_peft_model def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=True, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run dreambooth validation every X steps. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") # lora args parser.add_argument("--use_lora", action="store_true", help="Whether to use Lora for parameter efficient tuning") parser.add_argument("--lora_r", type=int, default=8, help="Lora rank, only used if use_lora is True") parser.add_argument("--lora_alpha", type=int, default=32, help="Lora alpha, only used if use_lora is True") parser.add_argument("--lora_dropout", type=float, default=0.0, help="Lora dropout, only used if use_lora is True") parser.add_argument( "--lora_bias", type=str, default="none", help="Bias type for Lora. Can be 'none', 'all' or 'lora_only', only used if use_lora is True", ) parser.add_argument( "--lora_text_encoder_r", type=int, default=8, help="Lora rank for text encoder, only used if `use_lora` and `train_text_encoder` are True", ) parser.add_argument( "--lora_text_encoder_alpha", type=int, default=32, help="Lora alpha for text encoder, only used if `use_lora` and `train_text_encoder` are True", ) parser.add_argument( "--lora_text_encoder_dropout", type=float, default=0.0, help="Lora dropout for text encoder, only used if `use_lora` and `train_text_encoder` are True", ) parser.add_argument( "--lora_text_encoder_bias", type=str, default="none", help="Bias type for Lora. Can be 'none', 'all' or 'lora_only', only used if use_lora and `train_text_encoder` are True", ) parser.add_argument( "--num_dataloader_workers", type=int, default=1, help="Num of workers for the training dataloader." ) parser.add_argument( "--no_tracemalloc", default=False, action="store_true", help="Flag to stop memory allocation tracing during training. This could speed up training on Windows.", ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--wandb_key", type=str, default=None, help=("If report to option is set to wandb, api-key for wandb used for login to wandb "), ) parser.add_argument( "--wandb_project_name", type=str, default=None, help=("If report to option is set to wandb, project name in wandb for log tracking "), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") return args
null
161,479
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from contextlib import nullcontext from pathlib import Path from typing import Optional import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoraConfig, get_peft_model def b2mb(x): return int(x / 2**20)
null
161,480
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from contextlib import nullcontext from pathlib import Path from typing import Optional import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoraConfig, get_peft_model def collate_fn(examples, with_prior_preservation=False): input_ids = [example["instance_prompt_ids"] for example in examples] pixel_values = [example["instance_images"] for example in examples] # Concat class and instance examples for prior preservation. # We do this to avoid doing two forward passes. if with_prior_preservation: input_ids += [example["class_prompt_ids"] for example in examples] pixel_values += [example["class_images"] for example in examples] pixel_values = torch.stack(pixel_values) pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float() input_ids = torch.cat(input_ids, dim=0) batch = { "input_ids": input_ids, "pixel_values": pixel_values, } return batch
null
161,481
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from contextlib import nullcontext from pathlib import Path from typing import Optional import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import LoraConfig, get_peft_model def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): if token is None: token = HfFolder.get_token() if organization is None: username = whoami(token)["name"] return f"{username}/{model_id}" else: return f"{organization}/{model_id}"
null
161,482
import argparse import evaluate import torch from accelerate import Accelerator, DistributedDataParallelKwargs from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from peft import ( PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, get_peft_model, ) from peft.utils.other import fsdp_auto_wrap_policy def parse_args(): parser = argparse.ArgumentParser(description="PEFT a transformers model on a sequence classification task") parser.add_argument( "--num_virtual_tokens", type=int, default=20, help="num_virtual_tokens if the number of virtual tokens used in prompt/prefix/P tuning.", ) parser.add_argument( "--encoder_hidden_size", type=int, default=128, help="encoder_hidden_size if the encoder hidden size used in P tuninig/Prefix tuning.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=1e-3, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--peft_type", type=str, default="p_tuning", help="The PEFT type to use.", choices=["p_tuning", "prefix_tuning", "prompt_tuning"], ) args = parser.parse_args() assert args.output_dir is not None, "Need an `output_dir` to store the finetune model and verify." return args
null
161,483
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from contextlib import nullcontext from pathlib import Path from typing import Optional import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import get_peft_model from peft.tuners.oft.config import OFTConfig def import_model_class_from_model_name_or_path(pretrained_model_name_or_path: str, revision: str): text_encoder_config = PretrainedConfig.from_pretrained( pretrained_model_name_or_path, subfolder="text_encoder", revision=revision, ) model_class = text_encoder_config.architectures[0] if model_class == "CLIPTextModel": from transformers import CLIPTextModel return CLIPTextModel elif model_class == "RobertaSeriesModelWithTransformation": from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation return RobertaSeriesModelWithTransformation else: raise ValueError(f"{model_class} is not supported.")
null
161,484
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from contextlib import nullcontext from pathlib import Path from typing import Optional import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import get_peft_model from peft.tuners.oft.config import OFTConfig def parse_args(input_args=None): parser = argparse.ArgumentParser(description="Simple example of a training script.") parser.add_argument( "--pretrained_model_name_or_path", type=str, default=None, required=True, help="Path to pretrained model or model identifier from huggingface.co/models.", ) parser.add_argument( "--revision", type=str, default=None, required=False, help="Revision of pretrained model identifier from huggingface.co/models.", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--instance_data_dir", type=str, default=None, required=True, help="A folder containing the training data of instance images.", ) parser.add_argument( "--class_data_dir", type=str, default=None, required=False, help="A folder containing the training data of class images.", ) parser.add_argument( "--instance_prompt", type=str, default=None, required=True, help="The prompt with identifier specifying the instance", ) parser.add_argument( "--class_prompt", type=str, default=None, help="The prompt to specify images in the same class as provided instance images.", ) parser.add_argument( "--with_prior_preservation", default=False, action="store_true", help="Flag to add prior preservation loss.", ) parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.") parser.add_argument( "--num_class_images", type=int, default=100, help=( "Minimal class images for prior preservation loss. If there are not enough images already present in" " class_data_dir, additional images will be sampled with class_prompt." ), ) parser.add_argument( "--validation_prompt", type=str, default=None, help="A prompt that is used during validation to verify that the model is learning.", ) parser.add_argument( "--num_validation_images", type=int, default=4, help="Number of images that should be generated during validation with `validation_prompt`.", ) parser.add_argument( "--validation_steps", type=int, default=100, help=( "Run dreambooth validation every X steps. Dreambooth validation consists of running the prompt" " `args.validation_prompt` multiple times: `args.num_validation_images`." ), ) parser.add_argument( "--output_dir", type=str, default="text-inversion-model", help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--resolution", type=int, default=512, help=( "The resolution for input images, all the images in the train/validation dataset will be resized to this" " resolution" ), ) parser.add_argument( "--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution" ) parser.add_argument("--train_text_encoder", action="store_true", help="Whether to train the text encoder") # oft args parser.add_argument("--use_oft", action="store_true", help="Whether to use OFT for parameter efficient tuning") parser.add_argument("--oft_r", type=int, default=8, help="OFT rank, only used if use_oft is True") parser.add_argument("--oft_alpha", type=int, default=32, help="OFT alpha, only used if use_oft is True") parser.add_argument("--oft_dropout", type=float, default=0.0, help="OFT dropout, only used if use_oft is True") parser.add_argument( "--oft_use_coft", action="store_true", help="Using constrained OFT, only used if use_oft is True" ) parser.add_argument( "--oft_eps", type=float, default=0.0, help="The control strength of COFT. Only has an effect if `oft_use_coft` is set to True.", ) parser.add_argument( "--oft_text_encoder_r", type=int, default=8, help="OFT rank for text encoder, only used if `use_oft` and `train_text_encoder` are True", ) parser.add_argument( "--oft_text_encoder_alpha", type=int, default=32, help="OFT alpha for text encoder, only used if `use_oft` and `train_text_encoder` are True", ) parser.add_argument( "--oft_text_encoder_dropout", type=float, default=0.0, help="OFT dropout for text encoder, only used if `use_oft` and `train_text_encoder` are True", ) parser.add_argument( "--oft_text_encoder_use_coft", action="store_true", help="Using constrained OFT on the text encoder, only used if use_oft is True", ) parser.add_argument( "--oft_text_encoder_eps", type=float, default=0.0, help="The control strength of COFT on the text encoder. Only has an effect if `oft_text_encoder_use_coft` is set to True.", ) parser.add_argument( "--num_dataloader_workers", type=int, default=1, help="Num of workers for the training dataloader." ) parser.add_argument( "--no_tracemalloc", default=False, action="store_true", help="Flag to stop memory allocation tracing during training. This could speed up training on Windows.", ) parser.add_argument( "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader." ) parser.add_argument( "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images." ) parser.add_argument("--num_train_epochs", type=int, default=1) parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help=( "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final" " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming" " training using `--resume_from_checkpoint`." ), ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help=( "Whether training should be resumed from a previous checkpoint. Use a path saved by" ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.' ), ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--learning_rate", type=float, default=5e-6, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--scale_lr", action="store_true", default=False, help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.", ) parser.add_argument( "--lr_scheduler", type=str, default="constant", help=( 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",' ' "constant", "constant_with_warmup"]' ), ) parser.add_argument( "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument( "--lr_num_cycles", type=int, default=1, help="Number of hard resets of the lr in cosine_with_restarts scheduler.", ) parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.") parser.add_argument( "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes." ) parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.") parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.") parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.") parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, default=None, help="The name of the repository to keep in sync with the local `output_dir`.", ) parser.add_argument( "--logging_dir", type=str, default="logs", help=( "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to" " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***." ), ) parser.add_argument( "--allow_tf32", action="store_true", help=( "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see" " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices" ), ) parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--wandb_key", type=str, default=None, help=("If report to option is set to wandb, api-key for wandb used for login to wandb "), ) parser.add_argument( "--wandb_project_name", type=str, default=None, help=("If report to option is set to wandb, project name in wandb for log tracking "), ) parser.add_argument( "--mixed_precision", type=str, default=None, choices=["no", "fp16", "bf16"], help=( "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the" " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config." ), ) parser.add_argument( "--prior_generation_precision", type=str, default=None, choices=["no", "fp32", "fp16", "bf16"], help=( "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >=" " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32." ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument( "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers." ) if input_args is not None: args = parser.parse_args(input_args) else: args = parser.parse_args() env_local_rank = int(os.environ.get("LOCAL_RANK", -1)) if env_local_rank != -1 and env_local_rank != args.local_rank: args.local_rank = env_local_rank if args.with_prior_preservation: if args.class_data_dir is None: raise ValueError("You must specify a data directory for class images.") if args.class_prompt is None: raise ValueError("You must specify prompt for class images.") else: # logger is not available yet if args.class_data_dir is not None: warnings.warn("You need not use --class_data_dir without --with_prior_preservation.") if args.class_prompt is not None: warnings.warn("You need not use --class_prompt without --with_prior_preservation.") return args
null
161,485
import argparse import gc import hashlib import itertools import logging import math import os import threading import warnings from contextlib import nullcontext from pathlib import Path from typing import Optional import datasets import diffusers import numpy as np import psutil import torch import torch.nn.functional as F import torch.utils.checkpoint import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDPMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, UNet2DConditionModel, ) from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from huggingface_hub import HfFolder, Repository, whoami from PIL import Image from torch.utils.data import Dataset from torchvision import transforms from tqdm.auto import tqdm from transformers import AutoTokenizer, PretrainedConfig from peft import get_peft_model from peft.tuners.oft.config import OFTConfig def b2mb(x): return int(x / 2**20)
null