Spaces:
Runtime error
Runtime error
| # Copyright 2023-present the HuggingFace Inc. team. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| from typing import Any, Optional | |
| import torch | |
| from peft.tuners.lora.layer import LoraLayer | |
| from peft.tuners.tuners_utils import BaseTunerLayer | |
| from peft.utils import get_auto_gptq_quant_linear | |
| class QuantLinear(torch.nn.Module, LoraLayer): | |
| def __init__( | |
| self, | |
| base_layer, | |
| adapter_name: str, | |
| r: int = 0, | |
| lora_alpha: int = 1, | |
| lora_dropout: float = 0.0, | |
| init_lora_weights: bool = True, | |
| use_rslora: bool = False, | |
| use_dora: bool = False, | |
| **kwargs, | |
| ): | |
| super().__init__() | |
| LoraLayer.__init__(self, base_layer) | |
| if use_dora: | |
| raise ValueError(f"{self.__class__.__name__} does not support DoRA yet, please set it to False") | |
| # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter | |
| # for backwards compatibility | |
| self.quant_linear_module = base_layer | |
| self._active_adapter = adapter_name | |
| self.update_layer( | |
| adapter_name, | |
| r, | |
| lora_alpha=lora_alpha, | |
| lora_dropout=lora_dropout, | |
| init_lora_weights=init_lora_weights, | |
| use_rslora=use_rslora, | |
| use_dora=use_dora, | |
| ) | |
| def forward(self, x: torch.Tensor): | |
| # note: logic differs from default Linear because merging is not supported | |
| result = self.quant_linear_module(x) | |
| if self.disable_adapters: | |
| return result | |
| for active_adapter in self.active_adapters: | |
| if active_adapter not in self.lora_A.keys(): | |
| continue | |
| lora_A = self.lora_A[active_adapter] | |
| lora_B = self.lora_B[active_adapter] | |
| dropout = self.lora_dropout[active_adapter] | |
| scaling = self.scaling[active_adapter] | |
| requires_conversion = not torch.is_autocast_enabled() | |
| if requires_conversion: | |
| expected_dtype = result.dtype | |
| x = x.to(lora_A.weight.dtype) | |
| output = lora_B(lora_A(dropout(x))) | |
| if requires_conversion: | |
| output = output.to(expected_dtype) | |
| output = output * scaling | |
| result += output | |
| return result | |
| def __repr__(self) -> str: | |
| rep = super().__repr__() | |
| return "lora." + rep | |
| # TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102 | |
| # def reset_lora_parameters(self, adapter_name): | |
| # if adapter_name in self.lora_A.keys(): | |
| # torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight) | |
| # torch.nn.init.zeros_(self.lora_B[adapter_name].weight) | |
| def dispatch_gptq( | |
| target: torch.nn.Module, | |
| adapter_name: str, | |
| **kwargs: Any, | |
| ) -> Optional[torch.nn.Module]: | |
| new_module = None | |
| if isinstance(target, BaseTunerLayer): | |
| target_base_layer = target.get_base_layer() | |
| else: | |
| target_base_layer = target | |
| gptq_quantization_config = kwargs.get("gptq_quantization_config", None) | |
| AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config) | |
| if AutoGPTQQuantLinear is not None and isinstance(target_base_layer, AutoGPTQQuantLinear): | |
| new_module = QuantLinear(target, adapter_name, **kwargs) | |
| target.qweight = target_base_layer.qweight | |
| return new_module | |