repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/token_classification/requirements.txt
transformers accelerate evaluate tqdm datasets Pillow torchvision
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/semantic_segmentation/semantic_segmentation_peft_lora.ipynb
from huggingface_hub import notebook_login notebook_login()from datasets import load_dataset ds = load_dataset("scene_parse_150", split="train[:150]")ds = ds.train_test_split(test_size=0.1) train_ds = ds["train"] test_ds = ds["test"]import json from huggingface_hub import cached_download, hf_hub_url repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} num_labels = len(id2label)from transformers import AutoImageProcessor checkpoint = "nvidia/mit-b0" image_processor = AutoImageProcessor.from_pretrained(checkpoint, do_reduce_labels=True)from torchvision.transforms import ColorJitter jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)from PIL import Image import numpy as np def handle_grayscale_image(image): np_image = np.array(image) if np_image.ndim == 2: tiled_image = np.tile(np.expand_dims(np_image, -1), 3) return Image.fromarray(tiled_image) else: return Image.fromarray(np_image) def train_transforms(example_batch): images = [jitter(handle_grayscale_image(x)) for x in example_batch["image"]] labels = [x for x in example_batch["annotation"]] inputs = image_processor(images, labels) return inputs def val_transforms(example_batch): images = [handle_grayscale_image(x) for x in example_batch["image"]] labels = [x for x in example_batch["annotation"]] inputs = image_processor(images, labels) return inputstrain_ds.set_transform(train_transforms) test_ds.set_transform(val_transforms)import torch from torch import nn import evaluate metric = evaluate.load("mean_iou") def compute_metrics(eval_pred): with torch.no_grad(): logits, labels = eval_pred logits_tensor = torch.from_numpy(logits) # scale the logits to the size of the label logits_tensor = nn.functional.interpolate( logits_tensor, size=labels.shape[-2:], mode="bilinear", align_corners=False, ).argmax(dim=1) pred_labels = logits_tensor.detach().cpu().numpy() # currently using _compute instead of compute # see this issue for more info: https://github.com/huggingface/evaluate/pull/328#issuecomment-1286866576 metrics = metric._compute( predictions=pred_labels, references=labels, num_labels=len(id2label), ignore_index=0, reduce_labels=image_processor.do_reduce_labels, ) # add per category metrics as individual key-value pairs per_category_accuracy = metrics.pop("per_category_accuracy").tolist() per_category_iou = metrics.pop("per_category_iou").tolist() metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)}) metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)}) return metricsdef print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}" )from transformers import AutoModelForSemanticSegmentation, TrainingArguments, Trainer model = AutoModelForSemanticSegmentation.from_pretrained( checkpoint, id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True ) print_trainable_parameters(model)from peft import LoraConfig, get_peft_model config = LoraConfig( r=32, lora_alpha=32, target_modules=["query", "value"], lora_dropout=0.1, bias="lora_only", modules_to_save=["decode_head"], ) lora_model = get_peft_model(model, config) print_trainable_parameters(lora_model)for name, param in lora_model.named_parameters(): if param.requires_grad: print(name, param.shape)model_name = checkpoint.split("/")[-1] training_args = TrainingArguments( output_dir=f"{model_name}-scene-parse-150-lora", learning_rate=5e-4, num_train_epochs=50, per_device_train_batch_size=4, per_device_eval_batch_size=2, save_total_limit=3, evaluation_strategy="epoch", save_strategy="epoch", logging_steps=5, remove_unused_columns=False, push_to_hub=True, label_names=["labels"], ) trainer = Trainer( model=lora_model, args=training_args, train_dataset=train_ds, eval_dataset=test_ds, compute_metrics=compute_metrics, ) trainer.train()model_id = "segformer-scene-parse-150-lora" lora_model.save_pretrained(model_id)from peft import PeftConfig config = PeftConfig.from_pretrained(model_id) model = AutoModelForSemanticSegmentation.from_pretrained( checkpoint, id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True ) # Load the Lora model inference_model = PeftModel.from_pretrained(model, model_id)import requests url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png" image = Image.open(requests.get(url, stream=True).raw) image# prepare image for the model encoding = image_processor(image.convert("RGB"), return_tensors="pt") print(encoding.pixel_values.shape)with torch.no_grad(): outputs = inference_model(pixel_values=encoding.pixel_values) logits = outputs.logits upsampled_logits = nn.functional.interpolate( logits, size=image.size[::-1], mode="bilinear", align_corners=False, ) pred_seg = upsampled_logits.argmax(dim=1)[0]def ade_palette(): """Creates a label colormap used in ADE20K segmentation benchmark. Returns: A colormap for visualizing segmentation results. """ return np.asarray( [ [0, 0, 0], [120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], [102, 255, 0], [92, 0, 255], ] )import matplotlib.pyplot as plt color_seg = np.zeros((pred_seg.shape[0], pred_seg.shape[1], 3), dtype=np.uint8) palette = np.array(ade_palette()) for label, color in enumerate(palette): color_seg[pred_seg == label, :] = color color_seg = color_seg[..., ::-1] # convert to BGR img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map img = img.astype(np.uint8) plt.figure(figsize=(15, 10)) plt.imshow(img) plt.show()
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/semantic_segmentation/README.md
# Fine-tuning for semantic segmentation using LoRA and 🤗 PEFT [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/semantic_segmentation/semantic_segmentation_peft_lora.ipynb) We provide a notebook (`semantic_segmentation_peft_lora.ipynb`) where we learn how to use [LoRA](https://arxiv.org/abs/2106.09685) from 🤗 PEFT to fine-tune an semantic segmentation by ONLY using **14%%** of the original trainable parameters of the model. LoRA adds low-rank "update matrices" to certain blocks in the underlying model (in this case the attention blocks) and ONLY trains those matrices during fine-tuning. During inference, these update matrices are _merged_ with the original model parameters. For more details, check out the [original LoRA paper](https://arxiv.org/abs/2106.09685).
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/image_classification/image_classification_timm_peft_lora.ipynb
import timm import torch from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transformimport peft from datasets import load_datasettorch.manual_seed(0)model_id_timm = "timm/poolformer_m36.sail_in1k"model = timm.create_model(model_id_timm, pretrained=True, num_classes=3)transform = create_transform(**resolve_data_config(model.pretrained_cfg, model=model))ds = load_dataset('beans')ds_train = ds["train"] ds_valid = ds["validation"]ds_train[0]['image']def process(batch): x = torch.cat([transform(img).unsqueeze(0) for img in batch['image']]) y = torch.tensor(batch['labels']) return {"x": x, "y": y}ds_train.set_transform(process) ds_valid.set_transform(process)train_loader = torch.utils.data.DataLoader(ds_train, batch_size=32) valid_loader = torch.utils.data.DataLoader(ds_valid, batch_size=32)def train(model, optimizer, criterion, train_dataloader, valid_dataloader, epochs): for epoch in range(epochs): model.train() train_loss = 0 for batch in train_dataloader: xb, yb = batch["x"], batch["y"] xb, yb = xb.to(device), yb.to(device) outputs = model(xb) lsm = torch.nn.functional.log_softmax(outputs, dim=-1) loss = criterion(lsm, yb) train_loss += loss.detach().float() loss.backward() optimizer.step() optimizer.zero_grad() model.eval() valid_loss = 0 correct = 0 n_total = 0 for batch in valid_dataloader: xb, yb = batch["x"], batch["y"] xb, yb = xb.to(device), yb.to(device) with torch.no_grad(): outputs = model(xb) lsm = torch.nn.functional.log_softmax(outputs, dim=-1) loss = criterion(lsm, yb) valid_loss += loss.detach().float() correct += (outputs.argmax(-1) == yb).sum().item() n_total += len(yb) train_loss_total = (train_loss / len(train_dataloader)).item() valid_loss_total = (valid_loss / len(valid_dataloader)).item() valid_acc_total = correct / n_total print(f"{epoch=:<2} {train_loss_total=:.4f} {valid_loss_total=:.4f} {valid_acc_total=:.4f}")[(n, type(m)) for n, m in model.named_modules()][:30][(n, type(m)) for n, m in model.named_modules()][-5:]config = peft.LoraConfig( r=8, target_modules=r".*\.mlp\.fc\d", modules_to_save=["head.fc"] )device = 'cuda' if torch.cuda.is_available() else 'cpu' peft_model = peft.get_peft_model(model, config).to(device) optimizer = torch.optim.Adam(peft_model.parameters(), lr=2e-4) criterion = torch.nn.CrossEntropyLoss() peft_model.print_trainable_parameters()%time train(peft_model, optimizer, criterion, train_loader, valid_dataloader=valid_loader, epochs=10)user = "BenjaminB" # put your user name here model_name = "peft-lora-with-timm-model" model_id = f"{user}/{model_name}"peft_model.push_to_hub(model_id);base_model = timm.create_model(model_id_timm, pretrained=True, num_classes=3) loaded = peft.PeftModel.from_pretrained(base_model, model_id)x = ds_train[:1]['x'] y_peft = peft_model(x.to(device)) y_loaded = loaded(x) torch.allclose(y_peft.cpu(), y_loaded)from huggingface_hub import delete_repodelete_repo(model_id)
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/image_classification/README.md
# Fine-tuning for image classification using LoRA and 🤗 PEFT ## Vision Transformer model from transformers [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/image_classification/image_classification_peft_lora.ipynb) We provide a notebook (`image_classification_peft_lora.ipynb`) where we learn how to use [LoRA](https://arxiv.org/abs/2106.09685) from 🤗 PEFT to fine-tune an image classification model by ONLY using **0.7%** of the original trainable parameters of the model. LoRA adds low-rank "update matrices" to certain blocks in the underlying model (in this case the attention blocks) and ONLY trains those matrices during fine-tuning. During inference, these update matrices are _merged_ with the original model parameters. For more details, check out the [original LoRA paper](https://arxiv.org/abs/2106.09685). ## PoolFormer model from timm [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/image_classification/image_classification_timm_peft_lora.ipynb) The notebook `image_classification_timm_peft_lora.ipynb` showcases fine-tuning an image classification model using from the [timm](https://huggingface.co/docs/timm/index) library. Again, LoRA is used to reduce the numberof trainable parameters to a fraction of the total.
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/image_classification/image_classification_peft_lora.ipynb
from huggingface_hub import notebook_login notebook_login()import transformers import accelerate import peftprint(f"Transformers version: {transformers.__version__}") print(f"Accelerate version: {accelerate.__version__}") print(f"PEFT version: {peft.__version__}")model_checkpoint = "google/vit-base-patch16-224-in21k" # pre-trained model from which to fine-tunefrom datasets import load_dataset dataset = load_dataset("food101", split="train[:5000]")labels = dataset.features["label"].names label2id, id2label = dict(), dict() for i, label in enumerate(labels): label2id[label] = i id2label[i] = label id2label[2]from transformers import AutoImageProcessor image_processor = AutoImageProcessor.from_pretrained(model_checkpoint) image_processorfrom torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) train_transforms = Compose( [ RandomResizedCrop(image_processor.size["height"]), RandomHorizontalFlip(), ToTensor(), normalize, ] ) val_transforms = Compose( [ Resize(image_processor.size["height"]), CenterCrop(image_processor.size["height"]), ToTensor(), normalize, ] ) def preprocess_train(example_batch): """Apply train_transforms across a batch.""" example_batch["pixel_values"] = [train_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch def preprocess_val(example_batch): """Apply val_transforms across a batch.""" example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch# split up training into training + validation splits = dataset.train_test_split(test_size=0.1) train_ds = splits["train"] val_ds = splits["test"]train_ds.set_transform(preprocess_train) val_ds.set_transform(preprocess_val)def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}" )from transformers import AutoModelForImageClassification, TrainingArguments, Trainer model = AutoModelForImageClassification.from_pretrained( model_checkpoint, label2id=label2id, id2label=id2label, ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint ) print_trainable_parameters(model)from peft import LoraConfig, get_peft_model config = LoraConfig( r=16, lora_alpha=16, target_modules=["query", "value"], lora_dropout=0.1, bias="none", modules_to_save=["classifier"], ) lora_model = get_peft_model(model, config) print_trainable_parameters(lora_model)from transformers import TrainingArguments, Trainer model_name = model_checkpoint.split("/")[-1] batch_size = 128 args = TrainingArguments( f"{model_name}-finetuned-lora-food101", remove_unused_columns=False, evaluation_strategy="epoch", save_strategy="epoch", learning_rate=5e-3, per_device_train_batch_size=batch_size, gradient_accumulation_steps=4, per_device_eval_batch_size=batch_size, fp16=True, num_train_epochs=5, logging_steps=10, load_best_model_at_end=True, metric_for_best_model="accuracy", push_to_hub=True, label_names=["labels"], )import numpy as np import evaluate metric = evaluate.load("accuracy") # the compute_metrics function takes a Named Tuple as input: # predictions, which are the logits of the model as Numpy arrays, # and label_ids, which are the ground-truth labels as Numpy arrays. def compute_metrics(eval_pred): """Computes accuracy on a batch of predictions""" predictions = np.argmax(eval_pred.predictions, axis=1) return metric.compute(predictions=predictions, references=eval_pred.label_ids)import torch def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) labels = torch.tensor([example["label"] for example in examples]) return {"pixel_values": pixel_values, "labels": labels}trainer = Trainer( model, args, train_dataset=train_ds, eval_dataset=val_ds, tokenizer=image_processor, compute_metrics=compute_metrics, data_collator=collate_fn, ) train_results = trainer.train()trainer.evaluate(val_ds)repo_name = f"sayakpaul/{model_name}-finetuned-lora-food101" lora_model.push_to_hub(repo_name)from peft import PeftConfig, PeftModel config = PeftConfig.from_pretrained(repo_name) model = model = AutoModelForImageClassification.from_pretrained( config.base_model_name_or_path, label2id=label2id, id2label=id2label, ignore_mismatched_sizes=True, # provide this in case you're planning to fine-tune an already fine-tuned checkpoint ) # Load the Lora model inference_model = PeftModel.from_pretrained(model, repo_name)from PIL import Image import requests url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/beignets.jpeg" image = Image.open(requests.get(url, stream=True).raw) imageimage_processor = AutoImageProcessor.from_pretrained(repo_name)# prepare image for the model encoding = image_processor(image.convert("RGB"), return_tensors="pt") print(encoding.pixel_values.shape)import torch # forward pass with torch.no_grad(): outputs = inference_model(**encoding) logits = outputs.logits predicted_class_idx = logits.argmax(-1).item() print("Predicted class:", inference_model.config.id2label[predicted_class_idx])
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/int8_training/Finetune_opt_bnb_peft.ipynb
import os import torch import torch.nn as nn import bitsandbytes as bnb from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained("facebook/opt-6.7b")from peft import prepare_model_for_int8_training model = prepare_model_for_int8_training(model)def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" )from peft import LoraConfig, get_peft_model config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM" ) model = get_peft_model(model, config) print_trainable_parameters(model)import transformers from datasets import load_dataset data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = transformers.Trainer( model=model, train_dataset=data["train"], args=transformers.TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=100, max_steps=200, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir="outputs", ), data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False # silence the warnings. Please re-enable for inference! trainer.train()from huggingface_hub import notebook_login notebook_login()model.push_to_hub("ybelkada/opt-6.7b-lora", use_auth_token=True)import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer peft_model_id = "ybelkada/opt-6.7b-lora" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained( config.base_model_name_or_path, return_dict=True, load_in_8bit=True, device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model model = PeftModel.from_pretrained(model, peft_model_id)batch = tokenizer("Two things are infinite: ", return_tensors="pt") with torch.cuda.amp.autocast(): output_tokens = model.generate(**batch, max_new_tokens=50) print("\n\n", tokenizer.decode(output_tokens[0], skip_special_tokens=True))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/int8_training/peft_adalora_whisper_large_training.py
import argparse import gc import json import logging import math import os from dataclasses import dataclass from datetime import datetime from pathlib import Path from random import randint from typing import Any, Dict, List, Union # datasets imports import datasets # metric imports import evaluate import numpy as np import torch import transformers import wandb # accelerate imports from accelerate import Accelerator, dispatch_model from accelerate.logging import get_logger from datasets import Audio, DatasetDict, IterableDatasetDict, interleave_datasets, load_dataset # hf imports from huggingface_hub import Repository from torch.utils.data import DataLoader from tqdm import tqdm from transformers import ( SchedulerType, WhisperForConditionalGeneration, WhisperProcessor, get_scheduler, set_seed, ) from transformers.models.whisper.english_normalizer import BasicTextNormalizer from transformers.utils import get_full_repo_name # peft imports from peft import AdaLoraConfig, LoraConfig, PeftModel, get_peft_model logger = get_logger(__name__, log_level="INFO") def parse_args(): parser = argparse.ArgumentParser(description="Whisper Fine-Tuning with AdaLora") parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument("--language", type=str, help="Language to use for training; e.g., 'Hindi' ", required=True) parser.add_argument("--language_abbr", type=str, help="Language to use for training; e.g., 'hi' ", required=True) parser.add_argument( "--task", type=str, default="transcribe", help="Task to use for training; e.g., 'transcribe' ", required=False ) parser.add_argument( "--dataset_name", type=str, default="mozilla-foundation/common_voice_11_0", help="Dataset to use for training; e.g., 'whisper' ", required=False, ) parser.add_argument( "--dataset_in_streaming_mode", action="store_true", help="Whether to use streaming mode for the dataset.", ) parser.add_argument( "--do_lower_case", action="store_true", help="lowercase the transcribed text before tokenizing" ) parser.add_argument( "--do_remove_punctuation", action="store_true", help="remove punctuation from the transcribed text" ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--max_audio_input_length", type=float, default=30.0, help="Maximum audio length in seconds.") parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--buffer_size", type=int, default=5000, help="Number of samples to prefetch in the streaming mode.", ) parser.add_argument( "--dataloader_pin_memory", action="store_true", help="Whether or not to pin memory for the DataLoader.", ) parser.add_argument( "--dataloader_num_workers", type=int, default=0, help="Number of subprocesses to use for data loading.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--load_best_model", action="store_true", help="Whether to load the best model at the end of training", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument( "--checkpointing_steps", type=int, default=500, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--logging_steps", type=int, default=100, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--evaluation_steps", type=int, default=500, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) # lora/adalora specific args parser.add_argument( "--use_peft", action="store_true", help="Whether to use PEFT", ) parser.add_argument( "--use_adalora", action="store_true", help="Whether to use AdaLoRA or LoRA. If set, uses AdaLoRA instead of the default LoRA.", ) parser.add_argument( "--init_r", type=int, default=12, help="Initial AdaLoRA rank", ) parser.add_argument( "--target_r", type=int, default=4, help="Target AdaLoRA rank", ) parser.add_argument( "--tinit", type=int, default=200, help="number of warmup steps for AdaLoRA wherein no pruning is performed", ) parser.add_argument( "--tfinal", type=int, default=1000, help=" fix the resulting budget distribution and fine-tune the model for tfinal steps when using AdaLoRA ", ) parser.add_argument( "--delta_t", type=int, default=10, help="interval of steps for AdaLoRA to update rank", ) parser.add_argument( "--lora_alpha", type=int, default=32, help="LORA alpha", ) parser.add_argument( "--r", type=int, default=8, help="LORA rank", ) parser.add_argument( "--lora_dropout", type=float, default=0.1, help="LORA dropout", ) parser.add_argument( "--orth_reg_weight", type=float, default=0.5, help="Orthogonal regularization weight", ) parser.add_argument( "--debug_mode", action="store_true", help="Whether to use debug mode", ) args = parser.parse_args() if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def load_streaming_dataset(dataset_name, dataset_config_name, split, **kwargs): if "+" in split: # load multiple splits separated by the `+` symbol *with* streaming mode dataset_splits = [ load_dataset(dataset_name, dataset_config_name, split=split_name, streaming=True, **kwargs) for split_name in split.split("+") ] # interleave multiple splits to form one dataset interleaved_dataset = interleave_datasets(dataset_splits) return interleaved_dataset else: # load a single split *with* streaming mode dataset = load_dataset(dataset_name, dataset_config_name, split=split, streaming=True, **kwargs) return dataset def prepare_dataset_wrapper(do_lower_case, do_remove_punctuation, processor, normalizer): def prepare_dataset(batch): # load and (possibly) resample audio data to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = processor.feature_extractor( audio["array"], sampling_rate=audio["sampling_rate"] ).input_features[0] # compute input length of audio sample in seconds batch["input_length"] = len(audio["array"]) / audio["sampling_rate"] # optional pre-processing steps transcription = batch["sentence"] if do_lower_case: transcription = transcription.lower() if do_remove_punctuation: transcription = normalizer(transcription).strip() # encode target text to label ids batch["labels"] = processor.tokenizer(transcription).input_ids return batch return prepare_dataset def save_model_hook(models, weights, output_dir): for model in models: model.save_pretrained(output_dir) # make sure to pop weight so that corresponding model is not saved again weights.pop() def load_model_hook(models, input_dir): while len(models) > 0: model = models.pop() # pop models so that they are not loaded again PeftModel.from_pretrained(model.base_model.model, input_dir) @dataclass class DataCollatorSpeechSeq2SeqWithPadding: processor: Any def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need different padding methods # first treat the audio inputs by simply returning torch tensors input_features = [{"input_features": feature["input_features"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") # get the tokenized label sequences label_features = [{"input_ids": feature["labels"]} for feature in features] # pad the labels to max length labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batch def get_audio_length_processor(max_input_length): def is_audio_in_length_range(length): return length < max_input_length return is_audio_in_length_range def evaluation_loop(model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator): model.eval() predictions = [] references = [] normalized_predictions = [] normalized_references = [] for _, batch in enumerate(tqdm(eval_dataloader)): with torch.cuda.amp.autocast(): with torch.no_grad(): generated_tokens = ( model.generate( input_features=batch["input_features"], forced_decoder_ids=forced_decoder_ids, max_new_tokens=255, ) .cpu() .numpy() ) labels = batch["labels"].cpu().numpy() labels = np.where(labels != -100, labels, processor.tokenizer.pad_token_id) decoded_preds = processor.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) decoded_labels = processor.tokenizer.batch_decode(labels, skip_special_tokens=True) predictions.extend(decoded_preds) references.extend(decoded_labels) normalized_predictions.extend([normalizer(pred).strip() for pred in decoded_preds]) normalized_references.extend([normalizer(label).strip() for label in decoded_labels]) del generated_tokens, labels, batch gc.collect() wer = 100 * metric.compute(predictions=predictions, references=references) normalized_wer = 100 * metric.compute(predictions=normalized_predictions, references=normalized_references) eval_metrics = {"eval/wer": wer, "eval/normalized_wer": normalized_wer} if accelerator.get_tracker("wandb"): sample_size = min(len(predictions), 256) ids = [randint(0, len(predictions) - 1) for p in range(0, sample_size)] sample_predictions = [predictions[i] for i in ids] sample_references = [references[i] for i in ids] sample_normalized_predictions = [normalized_predictions[i] for i in ids] sample_normalized_references = [normalized_references[i] for i in ids] table_rows = [ list(r) for r in zip( sample_predictions, sample_references, sample_normalized_predictions, sample_normalized_references ) ] eval_metrics["eval_samples"] = wandb.Table( columns=["predictions", "references", "normalized_predictions", "normalized_references"], rows=table_rows, ) return eval_metrics def main(): args = parse_args() accelerator_kwargs = {"gradient_accumulation_steps": args.gradient_accumulation_steps} if args.with_tracking: accelerator_kwargs["log_with"] = args.report_to accelerator_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(**accelerator_kwargs) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # load dataset either in streaming mode or not processor = WhisperProcessor.from_pretrained(args.model_name_or_path, language=args.language, task=args.task) normalizer = BasicTextNormalizer() prepare_dataset = prepare_dataset_wrapper(args.do_lower_case, args.do_remove_punctuation, processor, normalizer) is_audio_in_length_range = get_audio_length_processor(args.max_audio_input_length) data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor) if args.dataset_in_streaming_mode: raw_datasets = IterableDatasetDict() loading_method = load_streaming_dataset else: raw_datasets = DatasetDict() loading_method = load_dataset if args.debug_mode: train_split = "train[:100]" test_split = "test[:10]" else: train_split = "train+validation" test_split = "test" raw_datasets["train"] = loading_method( args.dataset_name, args.language_abbr, split=train_split, use_auth_token=True ) raw_datasets["test"] = loading_method(args.dataset_name, args.language_abbr, split=test_split, use_auth_token=True) raw_datasets = raw_datasets.cast_column("audio", Audio(sampling_rate=16000)) logger.info("Dataset loaded: %s", raw_datasets) logger.info(f'{raw_datasets["train"][0]}') vectorized_datasets = raw_datasets.map( prepare_dataset, remove_columns=list(next(iter(raw_datasets.values())).features), num_proc=args.preprocessing_num_workers, ).with_format("torch") if args.dataset_in_streaming_mode: vectorized_datasets["train"] = vectorized_datasets["train"].shuffle( buffer_size=args.buffer_size, seed=args.seed, ) # filter out audio files that are too long from the training set is_audio_in_length_range = get_audio_length_processor(args.max_audio_input_length) vectorized_datasets["train"] = vectorized_datasets["train"].filter( is_audio_in_length_range, input_columns=["input_length"] ) # get dataloaders train_dataloader = DataLoader( vectorized_datasets["train"], batch_size=args.per_device_train_batch_size, shuffle=True, collate_fn=data_collator, num_workers=args.dataloader_num_workers, pin_memory=args.dataloader_pin_memory, ) eval_dataloader = DataLoader( vectorized_datasets["test"], batch_size=args.per_device_eval_batch_size, collate_fn=data_collator, num_workers=args.dataloader_num_workers, pin_memory=args.dataloader_pin_memory, ) # metric metric = evaluate.load("wer") # model model = WhisperForConditionalGeneration.from_pretrained(args.model_name_or_path, load_in_8bit=True) model.config.forced_decoder_ids = None model.config.suppress_tokens = [] if len(set(model.hf_device_map.values()).intersection({"cpu", "disk"})) > 0: raise ValueError("Training on CPU or disk is not supported.") if len(set(model.hf_device_map.values())) > 1: device_map = model.hf_device_map.copy() # required because `labels` are on main execution device (0) while the output of `proj_out` is on other device. # So, this leads to device mismatch error when calculation cross-entropy between logits and labels. # Won't arise during inference as `labels` aren't supplied during that time # instead of changing device of one of the tied modules, I have to do this for all tied modules # else the execution device of remaining tied modules isn't changed device_map["model.decoder.embed_tokens"] = model._hf_hook.execution_device device_map["model.decoder.embed_positions"] = model._hf_hook.execution_device device_map["proj_out"] = model._hf_hook.execution_device dispatch_model(model, device_map=device_map) # preparing peft model if args.use_peft: from peft import prepare_model_for_int8_training model = prepare_model_for_int8_training(model) # as Whisper model uses Conv layer in encoder, checkpointing disables grad computation # to avoid this, make the inputs trainable def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.model.encoder.conv1.register_forward_hook(make_inputs_require_grad) # wrapping model with adalora tuner if args.use_adalora: config = AdaLoraConfig( init_r=args.init_r, target_r=args.target_r, beta1=0.85, beta2=0.85, tinit=args.tinit, tfinal=args.tfinal, deltaT=args.delta_t, lora_alpha=args.lora_alpha, lora_dropout=args.lora_dropout, target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], orth_reg_weight=args.orth_reg_weight, ) else: config = LoraConfig( r=args.r, lora_alpha=args.lora_alpha, target_modules=["q_proj", "v_proj"], lora_dropout=args.lora_dropout, ) model = get_peft_model(model, config) model.print_trainable_parameters() # optimizer optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay) if args.max_train_steps is None: num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # scheduler lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) accelerator.print(model) # Note here that the max steps is adjusted by the accelerator's num_processes args.max_train_steps = math.ceil(args.max_train_steps / accelerator.num_processes) if args.use_peft and args.use_adalora: model.base_model.peft_config["default"].total_step = args.max_train_steps # model.base_model.peft_config.total_step = args.max_train_steps # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: run_name = f"run-{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}" experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers( "Whisper PEFT Fine-Tuning", config=experiment_config, init_kwargs={"wandb": {"name": run_name}} ) # saving and loading checkpoints for resuming training accelerator.register_save_state_pre_hook(save_model_hook) accelerator.register_load_state_pre_hook(load_model_hook) total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) global_step = 0 starting_epoch = 0 best_metric = None resume_step = 0 forced_decoder_ids = processor.get_decoder_prompt_ids(language=args.language, task=args.task) # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) training_difference = os.path.splitext(path)[0] global_step = resume_step = int(training_difference.replace("step_", "")) starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) # We need to adjust the progress bar to the current step progress_bar.update(resume_step) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 running_loss = 0 for step, batch in enumerate(accelerator.skip_first_batches(train_dataloader, num_batches=resume_step)): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() # Update the importance of low-rank matrices # and allocate the budget accordingly. # This is only needed for AdaLora. # Note that this requires parameter gradients. # Hence being called before optimizer.zero_grad(). if args.use_peft and args.use_adalora: model.update_and_allocate(global_step) optimizer.zero_grad() global_step += 1 progress_bar.update(1) if args.with_tracking: step_loss = accelerator.reduce(loss.detach().clone()).item() total_loss += step_loss running_loss += step_loss if global_step % args.checkpointing_steps == 0: output_dir = os.path.join(args.output_dir, f"step_{global_step}") accelerator.save_state(output_dir) if global_step % args.logging_steps == 0: if args.with_tracking: accelerator.log({"train/running_loss": running_loss / args.logging_steps}, step=global_step) running_loss = 0 if global_step % args.evaluation_steps == 0: eval_metrics = evaluation_loop( model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator ) if args.with_tracking: logger.info(f"Step {global_step} eval metrics: {eval_metrics}") accelerator.log(eval_metrics, step=global_step) if best_metric is None or eval_metrics["eval/wer"] < best_metric: best_metric = eval_metrics["eval/wer"] accelerator.save_state(os.path.join(args.output_dir, "best_checkpoint")) model.train() if global_step >= args.max_train_steps: break if args.with_tracking: train_epoch_loss = total_loss / (step + 1) logger.info(f"Epoch {epoch} train loss: {train_epoch_loss}") accelerator.log({"epoch/train_loss": train_epoch_loss}, step=epoch) if args.push_to_hub and epoch <= args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process) # evaluate the model at the end of training eval_metrics = evaluation_loop( model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator ) if args.with_tracking: logger.info(f"Step {global_step} eval metrics: {eval_metrics}") accelerator.log(eval_metrics, step=global_step) if best_metric is None or eval_metrics["eval/wer"] < best_metric: best_metric = eval_metrics["eval/wer"] accelerator.save_state(os.path.join(args.output_dir, "best_checkpoint")) if accelerator.is_main_process: processor.tokenizer.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True ) if args.load_best_model: # load the best model accelerator.load_state(os.path.join(args.output_dir, "best_checkpoint")) model.resize_modules_by_rank_pattern(model.peft_config["default"].rank_pattern, "default") eval_metrics = evaluation_loop( model, eval_dataloader, processor, normalizer, metric, forced_decoder_ids, accelerator ) if args.with_tracking: best_metrics = {"best_" + k: v for k, v in eval_metrics.items()} accelerator.log(best_metrics, step=global_step) accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(args.output_dir, is_main_process=accelerator.is_main_process) if accelerator.is_main_process: processor.tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True) with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: eval_metrics.pop("eval_samples") json.dump(eval_metrics, f) if __name__ == "__main__": main()
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/int8_training/fine_tune_blip2_int8.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from datasets import load_dataset from torch.utils.data import DataLoader, Dataset from transformers import AutoModelForVision2Seq, AutoProcessor from peft import LoraConfig, get_peft_model # Let's define the LoraConfig config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", ) # We load our model and processor using `transformers` model = AutoModelForVision2Seq.from_pretrained("Salesforce/blip2-opt-2.7b", load_in_8bit=True) processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") # Get our peft model and print the number of trainable parameters model = get_peft_model(model, config) model.print_trainable_parameters() # Let's load the dataset here! dataset = load_dataset("ybelkada/football-dataset", split="train") class ImageCaptioningDataset(Dataset): def __init__(self, dataset, processor): self.dataset = dataset self.processor = processor def __len__(self): return len(self.dataset) def __getitem__(self, idx): item = self.dataset[idx] encoding = self.processor(images=item["image"], padding="max_length", return_tensors="pt") # remove batch dimension encoding = {k: v.squeeze() for k, v in encoding.items()} encoding["text"] = item["text"] return encoding def collator(batch): # pad the input_ids and attention_mask processed_batch = {} for key in batch[0].keys(): if key != "text": processed_batch[key] = torch.stack([example[key] for example in batch]) else: text_inputs = processor.tokenizer( [example["text"] for example in batch], padding=True, return_tensors="pt" ) processed_batch["input_ids"] = text_inputs["input_ids"] processed_batch["attention_mask"] = text_inputs["attention_mask"] return processed_batch train_dataset = ImageCaptioningDataset(dataset, processor) train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=2, collate_fn=collator) optimizer = torch.optim.AdamW(model.parameters(), lr=5e-5) device = "cuda" if torch.cuda.is_available() else "cpu" model.train() for epoch in range(50): print("Epoch:", epoch) for idx, batch in enumerate(train_dataloader): input_ids = batch.pop("input_ids").to(device) pixel_values = batch.pop("pixel_values").to(device, torch.float16) outputs = model(input_ids=input_ids, pixel_values=pixel_values, labels=input_ids) loss = outputs.loss print("Loss:", loss.item()) loss.backward() optimizer.step() optimizer.zero_grad() if idx % 10 == 0: generated_output = model.generate(pixel_values=pixel_values) print(processor.batch_decode(generated_output, skip_special_tokens=True))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb
from huggingface_hub import notebook_login notebook_login()# Select CUDA device index import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" model_name_or_path = "openai/whisper-large-v2" language = "Marathi" language_abbr = "mr" task = "transcribe" dataset_name = "mozilla-foundation/common_voice_11_0"from datasets import load_dataset, DatasetDict common_voice = DatasetDict() common_voice["train"] = load_dataset(dataset_name, language_abbr, split="train+validation", use_auth_token=True) common_voice["test"] = load_dataset(dataset_name, language_abbr, split="test", use_auth_token=True) print(common_voice)common_voice = common_voice.remove_columns( ["accent", "age", "client_id", "down_votes", "gender", "locale", "path", "segment", "up_votes"] ) print(common_voice)from transformers import WhisperFeatureExtractor feature_extractor = WhisperFeatureExtractor.from_pretrained(model_name_or_path)from transformers import WhisperTokenizer tokenizer = WhisperTokenizer.from_pretrained(model_name_or_path, language=language, task=task)from transformers import WhisperProcessor processor = WhisperProcessor.from_pretrained(model_name_or_path, language=language, task=task)print(common_voice["train"][0])from datasets import Audio common_voice = common_voice.cast_column("audio", Audio(sampling_rate=16000))print(common_voice["train"][0])def prepare_dataset(batch): # load and resample audio data from 48 to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = feature_extractor(audio["array"], sampling_rate=audio["sampling_rate"]).input_features[0] # encode target text to label ids batch["labels"] = tokenizer(batch["sentence"]).input_ids return batchcommon_voice = common_voice.map(prepare_dataset, remove_columns=common_voice.column_names["train"], num_proc=2)common_voice["train"]import torch from dataclasses import dataclass from typing import Any, Dict, List, Union @dataclass class DataCollatorSpeechSeq2SeqWithPadding: processor: Any def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need different padding methods # first treat the audio inputs by simply returning torch tensors input_features = [{"input_features": feature["input_features"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") # get the tokenized label sequences label_features = [{"input_ids": feature["labels"]} for feature in features] # pad the labels to max length labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batchdata_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor)import evaluate metric = evaluate.load("wer")def compute_metrics(pred): pred_ids = pred.predictions label_ids = pred.label_ids # replace -100 with the pad_token_id label_ids[label_ids == -100] = tokenizer.pad_token_id # we do not want to group tokens when computing the metrics pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) label_str = tokenizer.batch_decode(label_ids, skip_special_tokens=True) wer = 100 * metric.compute(predictions=pred_str, references=label_str) return {"wer": wer}from transformers import WhisperForConditionalGeneration model = WhisperForConditionalGeneration.from_pretrained(model_name_or_path, load_in_8bit=True) # model.hf_device_map - this should be {" ": 0}model.config.forced_decoder_ids = None model.config.suppress_tokens = []from peft import prepare_model_for_int8_training model = prepare_model_for_int8_training(model)from peft import LoraConfig, PeftModel, LoraModel, LoraConfig, get_peft_model config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none") model = get_peft_model(model, config) model.print_trainable_parameters()from transformers import Seq2SeqTrainingArguments training_args = Seq2SeqTrainingArguments( output_dir="temp", # change to a repo name of your choice per_device_train_batch_size=8, gradient_accumulation_steps=1, # increase by 2x for every 2x decrease in batch size learning_rate=1e-3, warmup_steps=50, num_train_epochs=3, evaluation_strategy="epoch", fp16=True, per_device_eval_batch_size=8, generation_max_length=128, logging_steps=25, remove_unused_columns=False, # required as the PeftModel forward doesn't have the signature of the wrapped model's forward label_names=["labels"], # same reason as above )from transformers import Seq2SeqTrainer, TrainerCallback, TrainingArguments, TrainerState, TrainerControl from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR class SavePeftModelCallback(TrainerCallback): def on_save( self, args: TrainingArguments, state: TrainerState, control: TrainerControl, **kwargs, ): checkpoint_folder = os.path.join(args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{state.global_step}") peft_model_path = os.path.join(checkpoint_folder, "adapter_model") kwargs["model"].save_pretrained(peft_model_path) pytorch_model_path = os.path.join(checkpoint_folder, "pytorch_model.bin") if os.path.exists(pytorch_model_path): os.remove(pytorch_model_path) return control trainer = Seq2SeqTrainer( args=training_args, model=model, train_dataset=common_voice["train"], eval_dataset=common_voice["test"], data_collator=data_collator, # compute_metrics=compute_metrics, tokenizer=processor.feature_extractor, callbacks=[SavePeftModelCallback], ) model.config.use_cache = False # silence the warnings. Please re-enable for inference!trainer.train()model_name_or_path = "openai/whisper-large-v2" peft_model_id = "smangrul/" + f"{model_name_or_path}-{model.peft_config.peft_type}-colab".replace("/", "-") model.push_to_hub(peft_model_id) print(peft_model_id)from peft import PeftModel, PeftConfig from transformers import WhisperForConditionalGeneration, Seq2SeqTrainer peft_model_id = "smangrul/openai-whisper-large-v2-LORA-colab" peft_config = PeftConfig.from_pretrained(peft_model_id) model = WhisperForConditionalGeneration.from_pretrained( peft_config.base_model_name_or_path, load_in_8bit=True, device_map="auto" ) model = PeftModel.from_pretrained(model, peft_model_id)from torch.utils.data import DataLoader from tqdm import tqdm import numpy as np import gc eval_dataloader = DataLoader(common_voice["test"], batch_size=8, collate_fn=data_collator) model.eval() for step, batch in enumerate(tqdm(eval_dataloader)): with torch.cuda.amp.autocast(): with torch.no_grad(): generated_tokens = ( model.generate( input_features=batch["input_features"].to("cuda"), decoder_input_ids=batch["labels"][:, :4].to("cuda"), max_new_tokens=255, ) .cpu() .numpy() ) labels = batch["labels"].cpu().numpy() labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) metric.add_batch( predictions=decoded_preds, references=decoded_labels, ) del generated_tokens, labels, batch gc.collect() wer = 100 * metric.compute() print(f"{wer=}")import torch import gradio as gr from transformers import ( AutomaticSpeechRecognitionPipeline, WhisperForConditionalGeneration, WhisperTokenizer, WhisperProcessor, ) from peft import PeftModel, PeftConfig peft_model_id = "smangrul/openai-whisper-large-v2-LORA-colab" language = "Marathi" task = "transcribe" peft_config = PeftConfig.from_pretrained(peft_model_id) model = WhisperForConditionalGeneration.from_pretrained( peft_config.base_model_name_or_path, load_in_8bit=True, device_map="auto" ) model = PeftModel.from_pretrained(model, peft_model_id) tokenizer = WhisperTokenizer.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task) feature_extractor = processor.feature_extractor forced_decoder_ids = processor.get_decoder_prompt_ids(language=language, task=task) pipe = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) def transcribe(audio): with torch.cuda.amp.autocast(): text = pipe(audio, generate_kwargs={"forced_decoder_ids": forced_decoder_ids}, max_new_tokens=255)["text"] return text iface = gr.Interface( fn=transcribe, inputs=gr.Audio(source="microphone", type="filepath"), outputs="text", title="PEFT LoRA + INT8 Whisper Large V2 Marathi", description="Realtime demo for Marathi speech recognition using `PEFT-LoRA+INT8` fine-tuned Whisper Large V2 model.", ) iface.launch(share=True)
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/int8_training/Finetune_flan_t5_large_bnb_peft.ipynb
# Select CUDA device index import os import torch os.environ["CUDA_VISIBLE_DEVICES"] = "0" from datasets import load_dataset from transformers import AutoModelForSeq2SeqLM, AutoTokenizer model_name = "google/flan-t5-large" model = AutoModelForSeq2SeqLM.from_pretrained(model_name, load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained(model_name)from peft import prepare_model_for_int8_training model = prepare_model_for_int8_training(model)from peft import LoraConfig, get_peft_model, TaskType def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="SEQ_2_SEQ_LM" ) model = get_peft_model(model, lora_config) print_trainable_parameters(model)# loading dataset dataset = load_dataset("financial_phrasebank", "sentences_allagree") dataset = dataset["train"].train_test_split(test_size=0.1) dataset["validation"] = dataset["test"] del dataset["test"] classes = dataset["train"].features["label"].names dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, )# data preprocessing text_column = "sentence" label_column = "text_label" max_length = 128 def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=3, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"]from transformers import TrainingArguments, Trainer training_args = TrainingArguments( "temp", evaluation_strategy="epoch", learning_rate=1e-3, gradient_accumulation_steps=1, auto_find_batch_size=True, num_train_epochs=1, save_steps=100, save_total_limit=8, ) trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, ) model.config.use_cache = False # silence the warnings. Please re-enable for inference!trainer.train()model.eval() input_text = "In January-September 2009 , the Group 's net interest income increased to EUR 112.4 mn from EUR 74.3 mn in January-September 2008 ." inputs = tokenizer(input_text, return_tensors="pt") outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print("input sentence: ", input_text) print(" output prediction: ", tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))from huggingface_hub import notebook_login notebook_login()model.push_to_hub("ybelkada/flan-t5-large-financial-phrasebank-lora", use_auth_token=True)import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForSeq2SeqLM, AutoTokenizer peft_model_id = "ybelkada/flan-t5-large-financial-phrasebank-lora" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path, torch_dtype="auto", device_map="auto") tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model model = PeftModel.from_pretrained(model, peft_model_id)model.eval() input_text = "In January-September 2009 , the Group 's net interest income increased to EUR 112.4 mn from EUR 74.3 mn in January-September 2008 ." inputs = tokenizer(input_text, return_tensors="pt") outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print("input sentence: ", input_text) print(" output prediction: ", tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
0
hf_public_repos/peft/examples
hf_public_repos/peft/examples/int8_training/run_adalora_whisper_int8.sh
accelerate launch --config_file config.yaml peft_adalora_whisper_large_training.py \ --model_name_or_path "openai/whisper-large-v2" \ --language "Marathi" \ --language_abbr "mr" \ --task "transcribe" \ --dataset_name "mozilla-foundation/common_voice_11_0" \ --push_to_hub \ --preprocessing_num_workers 2 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --dataloader_pin_memory \ --dataloader_num_workers 2 \ --learning_rate 1e-3 \ --weight_decay 1e-4 \ --num_train_epochs 3 \ --gradient_accumulation_steps 1 \ --lr_scheduler_type "linear" \ --num_warmup_steps 50 \ --output_dir "adalora_whisper_large_marathi_multi_adapter" \ --seed 42 \ --load_best_model \ --with_tracking \ --report_to "wandb" \ --hub_token $HUB_TOKEN \ --checkpointing_steps 2000 \ --evaluation_steps 2000 \ --logging_steps 25 \ --use_peft \ --use_adalora \ --init_r 12 \ --target_r 8 \ --tinit 100 \ --tfinal 800 \ --delta_t 10 \ --lora_alpha 32 \ --lora_dropout 0.1 \ --orth_reg_weight 0.5
0
hf_public_repos/peft
hf_public_repos/peft/scripts/stale.py
# Copyright 2023 The HuggingFace Team, the AllenNLP library authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to close stale issue. Taken in part from the AllenNLP repository. https://github.com/allenai/allennlp. """ import os from datetime import datetime as dt from datetime import timezone from github import Github LABELS_TO_EXEMPT = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", "PRs welcome to address this", ] def main(): g = Github(os.environ["GITHUB_TOKEN"]) repo = g.get_repo("huggingface/peft") open_issues = repo.get_issues(state="open") for issue in open_issues: comments = sorted([comment for comment in issue.get_comments()], key=lambda i: i.created_at, reverse=True) last_comment = comments[0] if len(comments) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.now(timezone.utc) - issue.updated_at).days > 7 and (dt.now(timezone.utc) - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): issue.edit(state="closed") elif ( (dt.now(timezone.utc) - issue.updated_at).days > 23 and (dt.now(timezone.utc) - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\n" ) if __name__ == "__main__": main()
0
hf_public_repos/peft
hf_public_repos/peft/scripts/log_reports.py
import json, os from pathlib import Path from datetime import date from tabulate import tabulate failed = [] passed = [] group_info = [] total_num_failed = 0 empty_file = False or len(list(Path().glob("*.log"))) == 0 total_empty_files = [] for log in Path().glob("*.log"): section_num_failed = 0 i = 0 with open(log, "r") as f: for line in f: line = json.loads(line) i += 1 if line.get("nodeid", "") != "": test = line["nodeid"] if line.get("duration", None) is not None: duration = f'{line["duration"]:.4f}' if line.get("outcome", "") == "failed": section_num_failed += 1 failed.append([test, duration, log.name.split('_')[0]]) total_num_failed += 1 else: passed.append([test, duration, log.name.split('_')[0]]) empty_file = i == 0 group_info.append([str(log), section_num_failed, failed]) total_empty_files.append(empty_file) os.remove(log) failed = [] no_error_payload = { "type": "section", "text": { "type": "plain_text", "text": "🌞 There were no failures!" if not any(total_empty_files) else "Something went wrong there is at least one empty file - please check GH action results.", "emoji": True } } message = "" payload = [ { "type": "header", "text": { "type": "plain_text", "text": "🤗 Results of the {} PEFT scheduled tests.".format(os.environ.get("TEST_TYPE", "")), } }, ] if total_num_failed > 0: for i, (name, num_failed, failed_tests) in enumerate(group_info): if num_failed > 0: if num_failed == 1: message += f"*{name}: {num_failed} failed test*\n" else: message += f"*{name}: {num_failed} failed tests*\n" failed_table = [] for test in failed_tests: failed_table.append(test[0].split("::")) failed_table = tabulate(failed_table, headers=["Test Location", "Test Case", "Test Name"], showindex="always", tablefmt="grid", maxcolwidths=[12, 12, 12]) message += '\n```\n' +failed_table + '\n```' if total_empty_files[i]: message += f"\n*{name}: Warning! Empty file - please check the GitHub action job *\n" print(f'### {message}') else: payload.append(no_error_payload) if os.environ.get("TEST_TYPE", "") != "": from slack_sdk import WebClient if len(message) != 0: md_report = { "type": "section", "text": { "type": "mrkdwn", "text": message }, } payload.append(md_report) action_button = { "type": "section", "text": { "type": "mrkdwn", "text": "*For more details:*" }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/peft/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } payload.append(action_button) date_report = { "type": "context", "elements": [ { "type": "plain_text", "text": f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}", }, ], } payload.append(date_report) print(payload) client = WebClient(token=os.environ.get("SLACK_API_TOKEN")) client.chat_postMessage(channel="#peft-ci-daily", text=message, blocks=payload)
0
hf_public_repos/peft
hf_public_repos/peft/scripts/launch_notebook_mp.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This is a minimal example of launching PEFT with Accelerate. This used to cause issues because PEFT would eagerly # import bitsandbytes, which initializes CUDA, resulting in: # > RuntimeError: Cannot re-initialize CUDA in forked subprocess. To use CUDA with multiprocessing, you must use the # > 'spawn' start method # This script exists to ensure that this issue does not reoccur. import torch import peft from accelerate import notebook_launcher def init(): class MyModule(torch.nn.Module): def __init__(self): super().__init__() self.linear = torch.nn.Linear(1, 2) def forward(self, x): return self.linear(x) model = MyModule().to("cuda") peft.get_peft_model(model, peft.LoraConfig(target_modules=["linear"])) def main(): notebook_launcher(init, (), num_processes=2) if __name__ == "__main__": main()
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import json import os from dataclasses import asdict, dataclass, field from typing import Dict, Optional, Union from huggingface_hub import hf_hub_download from transformers.utils import PushToHubMixin from .utils import CONFIG_NAME, PeftType, TaskType @dataclass class PeftConfigMixin(PushToHubMixin): r""" This is the base configuration class for PEFT adapter models. It contains all the methods that are common to all PEFT adapter models. This class inherits from [`~transformers.utils.PushToHubMixin`] which contains the methods to push your model to the Hub. The method `save_pretrained` will save the configuration of your adapter model in a directory. The method `from_pretrained` will load the configuration of your adapter model from a directory. Args: peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. """ peft_type: Optional[PeftType] = field(default=None, metadata={"help": "The type of PEFT model."}) auto_mapping: Optional[dict] = field( default=None, metadata={"help": "An auto mapping dict to help retrieve the base model class if needed."} ) def to_dict(self) -> Dict: r""" Returns the configuration for your adapter model as a dictionary. """ return asdict(self) def save_pretrained(self, save_directory: str, **kwargs) -> None: r""" This method saves the configuration of your adapter model in a directory. Args: save_directory (`str`): The directory where the configuration will be saved. kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the [`~transformers.utils.PushToHubMixin.push_to_hub`] method. """ if os.path.isfile(save_directory): raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) auto_mapping_dict = kwargs.pop("auto_mapping_dict", None) output_dict = asdict(self) # converting set type to list for key, value in output_dict.items(): if isinstance(value, set): output_dict[key] = list(value) output_path = os.path.join(save_directory, CONFIG_NAME) # Add auto mapping details for custom models. if auto_mapping_dict is not None: output_dict["auto_mapping"] = auto_mapping_dict # save it with open(output_path, "w") as writer: writer.write(json.dumps(output_dict, indent=2, sort_keys=True)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: str, subfolder: Optional[str] = None, **kwargs): r""" This method loads the configuration of your adapter model from a directory. Args: pretrained_model_name_or_path (`str`): The directory or the Hub repository id where the configuration is saved. kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the child class initialization. """ # Avoid circular dependency .. TODO: fix this with a larger refactor from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING path = ( os.path.join(pretrained_model_name_or_path, subfolder) if subfolder is not None else pretrained_model_name_or_path ) hf_hub_download_kwargs, class_kwargs, _ = cls._split_kwargs(kwargs) if os.path.isfile(os.path.join(path, CONFIG_NAME)): config_file = os.path.join(path, CONFIG_NAME) else: try: config_file = hf_hub_download( pretrained_model_name_or_path, CONFIG_NAME, subfolder=subfolder, **hf_hub_download_kwargs ) except Exception: raise ValueError(f"Can't find '{CONFIG_NAME}' at '{pretrained_model_name_or_path}'") loaded_attributes = cls.from_json_file(config_file) # TODO: this hack is needed to fix the following issue (on commit 702f937): # if someone saves a default config and loads it back with `PeftConfig` class it yields to # not loading the correct config class. # from peft import AdaLoraConfig, PeftConfig # peft_config = AdaLoraConfig() # print(peft_config) # >>> AdaLoraConfig(peft_type=<PeftType.ADALORA: 'ADALORA'>, auto_mapping=None, base_model_name_or_path=None, # revision=None, task_type=None, inference_mode=False, r=8, target_modules=None, lora_alpha=8, lora_dropout=0.0, ... # # peft_config.save_pretrained("./test_config") # peft_config = PeftConfig.from_pretrained("./test_config") # print(peft_config) # >>> PeftConfig(peft_type='ADALORA', auto_mapping=None, base_model_name_or_path=None, revision=None, task_type=None, inference_mode=False) if "peft_type" in loaded_attributes: peft_type = loaded_attributes["peft_type"] config_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type] else: config_cls = cls kwargs = {**class_kwargs, **loaded_attributes} config = config_cls(**kwargs) return config @classmethod def from_json_file(cls, path_json_file: str, **kwargs): r""" Loads a configuration file from a json file. Args: path_json_file (`str`): The path to the json file. """ with open(path_json_file, "r") as file: json_object = json.load(file) return json_object @classmethod def _split_kwargs(cls, kwargs): hf_hub_download_kwargs = {} class_kwargs = {} other_kwargs = {} for key, value in kwargs.items(): if key in inspect.signature(hf_hub_download).parameters: hf_hub_download_kwargs[key] = value elif key in list(cls.__annotations__): class_kwargs[key] = value else: other_kwargs[key] = value return hf_hub_download_kwargs, class_kwargs, other_kwargs @classmethod def _get_peft_type( cls, model_id: str, **hf_hub_download_kwargs, ): subfolder = hf_hub_download_kwargs.get("subfolder", None) path = os.path.join(model_id, subfolder) if subfolder is not None else model_id if os.path.isfile(os.path.join(path, CONFIG_NAME)): config_file = os.path.join(path, CONFIG_NAME) else: try: config_file = hf_hub_download( model_id, CONFIG_NAME, **hf_hub_download_kwargs, ) except Exception: raise ValueError(f"Can't find '{CONFIG_NAME}' at '{model_id}'") loaded_attributes = cls.from_json_file(config_file) return loaded_attributes["peft_type"] @property def is_prompt_learning(self) -> bool: r""" Utility method to check if the configuration is for prompt learning. """ return False @property def is_adaption_prompt(self) -> bool: """Return True if this is an adaption prompt config.""" return False @dataclass class PeftConfig(PeftConfigMixin): """ This is the base configuration class to store the configuration of a [`PeftModel`]. Args: peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use. task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform. inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode. """ base_model_name_or_path: Optional[str] = field( default=None, metadata={"help": "The name of the base model to use."} ) revision: Optional[str] = field(default=None, metadata={"help": "The specific model version to use."}) peft_type: Optional[Union[str, PeftType]] = field(default=None, metadata={"help": "Peft type"}) task_type: Optional[Union[str, TaskType]] = field(default=None, metadata={"help": "Task type"}) inference_mode: bool = field(default=False, metadata={"help": "Whether to use inference mode"}) @dataclass class PromptLearningConfig(PeftConfig): """ This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or [`PromptTuning`]. Args: num_virtual_tokens (`int`): The number of virtual tokens to use. token_dim (`int`): The hidden embedding dimension of the base transformer model. num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model. num_attention_heads (`int`): The number of attention heads in the base transformer model. num_layers (`int`): The number of layers in the base transformer model. """ num_virtual_tokens: int = field(default=None, metadata={"help": "Number of virtual tokens"}) token_dim: int = field( default=None, metadata={"help": "The hidden embedding dimension of the base transformer model"} ) num_transformer_submodules: Optional[int] = field( default=None, metadata={"help": "Number of transformer submodules"} ) num_attention_heads: Optional[int] = field(default=None, metadata={"help": "Number of attention heads"}) num_layers: Optional[int] = field(default=None, metadata={"help": "Number of transformer layers"}) @property def is_prompt_learning(self) -> bool: r""" Utility method to check if the configuration is for prompt learning. """ return True
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __version__ = "0.7.1.dev0" from .auto import ( AutoPeftModel, AutoPeftModelForCausalLM, AutoPeftModelForSequenceClassification, AutoPeftModelForSeq2SeqLM, AutoPeftModelForTokenClassification, AutoPeftModelForQuestionAnswering, AutoPeftModelForFeatureExtraction, ) from .mapping import ( MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING, get_peft_config, get_peft_model, inject_adapter_in_model, ) from .mixed_model import PeftMixedModel from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, PeftModelForQuestionAnswering, PeftModelForFeatureExtraction, ) from .tuners import ( AdaptionPromptConfig, AdaptionPromptModel, LoraConfig, LoftQConfig, LoraModel, LoHaConfig, LoHaModel, LoKrConfig, LoKrModel, IA3Config, IA3Model, AdaLoraConfig, AdaLoraModel, PrefixEncoder, PrefixTuningConfig, PromptEmbedding, PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType, PromptTuningConfig, PromptTuningInit, MultitaskPromptTuningConfig, MultitaskPromptTuningInit, OFTConfig, OFTModel, ) from .utils import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, PeftType, TaskType, bloom_model_postprocess_past_key_value, get_peft_model_state_dict, prepare_model_for_int8_training, prepare_model_for_kbit_training, set_peft_model_state_dict, shift_tokens_right, load_peft_weights, ) from .config import PeftConfig, PromptLearningConfig
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/peft_model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import collections import inspect import os import warnings from contextlib import contextmanager from copy import deepcopy from typing import Any, Dict, List, Optional, Union import torch from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory from huggingface_hub import ModelCard, ModelCardData, hf_hub_download from safetensors.torch import save_file as safe_save_file from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.modeling_outputs import QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from . import __version__ from .config import PeftConfig from .tuners import ( AdaLoraModel, AdaptionPromptModel, IA3Model, LoHaModel, LoKrModel, LoraModel, MultitaskPromptEmbedding, OFTModel, PrefixEncoder, PromptEmbedding, PromptEncoder, ) from .utils import ( SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftType, TaskType, _get_batch_size, _prepare_prompt_learning_config, _set_adapter, _set_trainable, get_peft_model_state_dict, id_tensor_storage, infer_device, load_peft_weights, set_peft_model_state_dict, shift_tokens_right, ) PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.LOHA: LoHaModel, PeftType.LOKR: LoKrModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder, PeftType.ADALORA: AdaLoraModel, PeftType.ADAPTION_PROMPT: AdaptionPromptModel, PeftType.IA3: IA3Model, PeftType.OFT: OFTModel, } class PeftModel(PushToHubMixin, torch.nn.Module): """ Base model encompassing various Peft methods. Args: model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. peft_config ([`PeftConfig`]): The configuration of the Peft model. adapter_name (`str`, *optional*): The name of the adapter, defaults to `"default"`. **Attributes**: - **base_model** ([`torch.nn.Module`]) -- The base transformer model used for Peft. - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when saving the model. - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if using [`PromptLearningConfig`]. - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if using [`PromptLearningConfig`]. - **transformer_backbone_name** (`str`) -- The name of the transformer backbone in the base model if using [`PromptLearningConfig`]. - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone in the base model if using [`PromptLearningConfig`]. """ def __init__(self, model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__() self.modules_to_save = None self.active_adapter = adapter_name self.peft_type = peft_config.peft_type self._is_prompt_learning = peft_config.is_prompt_learning if self._is_prompt_learning: self._peft_config = {adapter_name: peft_config} self.base_model = model self.add_adapter(adapter_name, peft_config) else: self._peft_config = None cls = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type] self.base_model = cls(model, {adapter_name: peft_config}, adapter_name) self.set_additional_trainable_modules(peft_config, adapter_name) if getattr(model, "is_gradient_checkpointing", True): model = self._prepare_model_for_gradient_checkpointing(model) # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected # behavior we disable that in this line. if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): self.base_model.config.pretraining_tp = 1 @property def peft_config(self) -> Dict[str, PeftConfig]: if self._is_prompt_learning: return self._peft_config return self.base_model.peft_config @property def active_adapters(self) -> list[str]: try: adapters = self.base_model.active_adapters except AttributeError: adapters = self.active_adapter if isinstance(adapters, str): adapters = [adapters] return adapters @peft_config.setter def peft_config(self, value: Dict[str, PeftConfig]): if self._is_prompt_learning: self._peft_config = value else: self.base_model.peft_config = value def save_pretrained( self, save_directory: str, safe_serialization: bool = True, selected_adapters: Optional[List[str]] = None, save_embedding_layers: Union[str, bool] = "auto", is_main_process: bool = True, **kwargs: Any, ) -> None: r""" This function saves the adapter model and the adapter configuration files to a directory, so that it can be reloaded using the [`PeftModel.from_pretrained`] class method, and also used by the [`PeftModel.push_to_hub`] method. Args: save_directory (`str`): Directory where the adapter model and configuration files will be saved (will be created if it does not exist). safe_serialization (`bool`, *optional*): Whether to save the adapter files in safetensors format, defaults to `True`. selected_adapters (`List[str]`, *optional*): A list of adapters to be saved. If `None`, will default to all adapters. save_embedding_layers (`Union[bool, str]`, *optional*, defaults to `"auto"`): If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. and automatically sets the boolean flag. This only works for 🤗 transformers models. is_main_process (`bool`, *optional*): Whether the process calling this is the main process or not. Will default to `True`. Will not save the checkpoint if not on the main process, which is important for multi device setups (e.g. DDP). kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the `push_to_hub` method. """ if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") if selected_adapters is None: selected_adapters = list(self.peft_config.keys()) else: if any( selected_adapter_name not in list(self.peft_config.keys()) for selected_adapter_name in selected_adapters ): raise ValueError( f"You passed an invalid `selected_adapters` arguments, current supported adapter names are" f" {list(self.peft_config.keys())} - got {selected_adapters}." ) if is_main_process: os.makedirs(save_directory, exist_ok=True) self.create_or_update_model_card(save_directory) for adapter_name in selected_adapters: peft_config = self.peft_config[adapter_name] # save only the trainable weights output_state_dict = get_peft_model_state_dict( self, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name, save_embedding_layers=save_embedding_layers, ) output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory os.makedirs(output_dir, exist_ok=True) if is_main_process and safe_serialization: # Section copied from: https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_utils.py#L2111-L2134 # Safetensors does not allow tensor aliasing. # We're going to remove aliases before saving ptrs = collections.defaultdict(list) for name, tensor in output_state_dict.items(): # Sometimes in the state_dict we have non-tensor objects. # e.g. in bitsandbytes we have some `str` objects in the state_dict if isinstance(tensor, torch.Tensor): ptrs[id_tensor_storage(tensor)].append(name) else: # In the non-tensor case, fall back to the pointer of the object itself ptrs[id(tensor)].append(name) # These are all the pointers of shared tensors. shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1} for _, names in shared_ptrs.items(): # Here we just clone the shared tensors to avoid tensor aliasing which is # not supported in safetensors. for shared_tensor_name in names[1:]: output_state_dict[shared_tensor_name] = output_state_dict[shared_tensor_name].clone() safe_save_file( output_state_dict, os.path.join(output_dir, SAFETENSORS_WEIGHTS_NAME), metadata={"format": "pt"}, ) elif is_main_process: torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) # save the config and change the inference mode to `True` if peft_config.base_model_name_or_path is None: peft_config.base_model_name_or_path = ( self.base_model.__dict__.get("name_or_path", None) if peft_config.is_prompt_learning else self.base_model.model.__dict__.get("name_or_path", None) ) inference_mode = peft_config.inference_mode peft_config.inference_mode = True if peft_config.task_type is None: # deal with auto mapping base_model_class = self._get_base_model_class( is_prompt_tuning=peft_config.is_prompt_learning, ) parent_library = base_model_class.__module__ auto_mapping_dict = { "base_model_class": base_model_class.__name__, "parent_library": parent_library, } else: auto_mapping_dict = None if is_main_process: peft_config.save_pretrained(output_dir, auto_mapping_dict=auto_mapping_dict) peft_config.inference_mode = inference_mode @classmethod def from_pretrained( cls, model: torch.nn.Module, model_id: Union[str, os.PathLike], adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, **kwargs: Any, ) -> "PeftModel": r""" Instantiate a PEFT model from a pretrained model and loaded PEFT weights. Note that the passed `model` may be modified inplace. Args: model ([`torch.nn.Module`]): The model to be adapted. For 🤗 Transformers models, the model should be initialized with the [`~transformers.PreTrainedModel.from_pretrained`]. model_id (`str` or `os.PathLike`): The name of the PEFT configuration to use. Can be either: - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` method (`./my_peft_config_directory/`). adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to be loaded. This is useful for loading multiple adapters. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. config ([`~peft.PeftConfig`], *optional*): The configuration object to use instead of an automatically loaded configuation. This configuration object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already loaded before calling `from_pretrained`. kwargs: (`optional`): Additional keyword arguments passed along to the specific PEFT configuration class. """ from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING # load the config if config is None: config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, subfolder=kwargs.get("subfolder", None), revision=kwargs.get("revision", None), cache_dir=kwargs.get("cache_dir", None), use_auth_token=kwargs.get("use_auth_token", None), token=kwargs.get("token", None), ) ].from_pretrained(model_id, **kwargs) elif isinstance(config, PeftConfig): config.inference_mode = not is_trainable else: raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") if (getattr(model, "hf_device_map", None) is not None) and len( set(model.hf_device_map.values()).intersection({"cpu", "disk"}) ) > 0: remove_hook_from_submodules(model) if config.is_prompt_learning and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: config.inference_mode = not is_trainable if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys(): model = cls(model, config, adapter_name) else: model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name) model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) return model def _setup_prompt_encoder(self, adapter_name: str): config = self.peft_config[adapter_name] if not hasattr(self, "prompt_encoder"): self.prompt_encoder = torch.nn.ModuleDict({}) self.prompt_tokens = {} transformer_backbone = None for name, module in self.base_model.named_children(): for param in module.parameters(): param.requires_grad = False if isinstance(module, PreTrainedModel): # Make sure to freeze Tranformers model if transformer_backbone is None: transformer_backbone = module self.transformer_backbone_name = name if transformer_backbone is None: transformer_backbone = self.base_model if config.num_transformer_submodules is None: config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 for named_param, value in list(transformer_backbone.named_parameters()): # for ZeRO-3, the tensor is sharded across accelerators and deepspeed modifies it to a tensor with shape [0] # the actual unsharded shape is stored in "ds_shape" attribute # special handling is needed in case the model is initialized in deepspeed.zero.Init() context or HfDeepSpeedConfig # has been called before # For reference refer to issue: https://github.com/huggingface/peft/issues/996 deepspeed_distributed_tensor_shape = getattr(value, "ds_shape", None) if value.shape[0] == self.base_model.config.vocab_size or ( deepspeed_distributed_tensor_shape is not None and deepspeed_distributed_tensor_shape[0] == self.base_model.config.vocab_size ): self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", "")) break if config.peft_type == PeftType.PROMPT_TUNING: prompt_encoder = PromptEmbedding(config, self.word_embeddings) elif config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompt_encoder = MultitaskPromptEmbedding(config, self.word_embeddings) elif config.peft_type == PeftType.P_TUNING: prompt_encoder = PromptEncoder(config) elif config.peft_type == PeftType.PREFIX_TUNING: prompt_encoder = PrefixEncoder(config) else: raise ValueError("Not supported") prompt_encoder = prompt_encoder.to(self.device) self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder})) self.prompt_tokens[adapter_name] = torch.arange( config.num_virtual_tokens * config.num_transformer_submodules ).long() def _prepare_model_for_gradient_checkpointing(self, model: PreTrainedModel): r""" Prepares the model for gradient checkpointing if necessary """ if not ( getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) or getattr(model, "is_quantized", False) ): if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() elif hasattr(model, "get_input_embeddings"): def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) return model def get_prompt_embedding_to_save(self, adapter_name: str) -> torch.Tensor: """ Returns the prompt embedding to save when saving the model. Only applicable when using a prompt learning method. """ prompt_encoder = self.prompt_encoder[adapter_name] prompt_tokens = ( self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(prompt_encoder.embedding.weight.device) ) if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens] if self.peft_config[adapter_name].peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompt_embeddings = super(MultitaskPromptEmbedding, prompt_encoder).forward(prompt_tokens) else: prompt_embeddings = prompt_encoder(prompt_tokens) return prompt_embeddings[0].detach().cpu() def get_prompt(self, batch_size: int, task_ids: Optional[torch.Tensor] = None) -> torch.Tensor: """ Returns the virtual prompts to use for Peft. Only applicable when using a prompt learning method. """ peft_config = self.active_peft_config prompt_encoder = self.prompt_encoder[self.active_adapter] prompt_tokens = ( self.prompt_tokens[self.active_adapter] .unsqueeze(0) .expand(batch_size, -1) .to(prompt_encoder.embedding.weight.device) ) if peft_config.peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens] if peft_config.inference_mode: past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: past_key_values = prompt_encoder(prompt_tokens) if self.base_model_torch_dtype is not None: past_key_values = past_key_values.to(self.base_model_torch_dtype) past_key_values = past_key_values.view( batch_size, peft_config.num_virtual_tokens, peft_config.num_layers * 2, peft_config.num_attention_heads, peft_config.token_dim // peft_config.num_attention_heads, ) if peft_config.num_transformer_submodules == 2: past_key_values = torch.cat([past_key_values, past_key_values], dim=2) past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split( peft_config.num_transformer_submodules * 2 ) if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None: post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type] past_key_values = post_process_fn(past_key_values) return past_key_values else: if peft_config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: prompts = prompt_encoder(prompt_tokens, task_ids) else: if peft_config.inference_mode: prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: prompts = prompt_encoder(prompt_tokens) return prompts def get_nb_trainable_parameters(self) -> tuple[int, int]: r""" Returns the number of trainable parameters and the number of all parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in self.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel # Due to the design of 4bit linear layers from bitsandbytes # one needs to multiply the number of parameters by 2 to get # the correct number of parameters if param.__class__.__name__ == "Params4bit": num_params = num_params * 2 all_param += num_params if param.requires_grad: trainable_params += num_params return trainable_params, all_param def print_trainable_parameters(self) -> None: """ Prints the number of trainable parameters in the model. """ trainable_params, all_param = self.get_nb_trainable_parameters() print( f"trainable params: {trainable_params:,d} || all params: {all_param:,d} || trainable%: {100 * trainable_params / all_param}" ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.base_model, name) def forward(self, *args: Any, **kwargs: Any): """ Forward pass of the model. """ return self.get_base_model()(*args, **kwargs) def _get_base_model_class(self, is_prompt_tuning=False): """ Returns the base model class. """ if not is_prompt_tuning: return self.base_model.model.__class__ return self.base_model.__class__ @contextmanager def disable_adapter(self): """ Context manager that disables the adapter module. Use this to run inference on the base model. Example: ```py >>> with model.disable_adapter(): ... model(inputs) ``` """ try: if self.peft_config[self.active_adapter].is_prompt_learning: # TODO: consider replacing this patching of methods with a more robust mechanism: setting a flag and # letting the underyling methods deal with it, same as how LoRA does it. old_forward = self.forward self.forward = self.base_model.forward old_prepare_inputs_for_generation = self.prepare_inputs_for_generation self.prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation else: self.base_model.disable_adapter_layers() yield finally: if self.peft_config[self.active_adapter].is_prompt_learning: self.forward = old_forward self.old_prepare_inputs_for_generation = old_prepare_inputs_for_generation else: self.base_model.enable_adapter_layers() def get_base_model(self) -> torch.nn.Module: """ Returns the base model. """ return self.base_model if self.active_peft_config.is_prompt_learning else self.base_model.model def add_adapter(self, adapter_name: str, peft_config: PeftConfig) -> None: """ Add an adapter to the model based on the passed configuration. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. """ if peft_config.peft_type != self.peft_type: raise ValueError( f"Cannot combine adapters with different peft types. " f"Found {self.peft_type} and {peft_config.peft_type}." ) try: if peft_config.is_prompt_learning: self.peft_config[adapter_name] = peft_config if hasattr(self.config, "to_dict"): dict_config = self.config.to_dict() else: dict_config = self.config peft_config = _prepare_prompt_learning_config(peft_config, dict_config) self._setup_prompt_encoder(adapter_name) elif peft_config.is_adaption_prompt: self.base_model.add_adapter(adapter_name, peft_config) else: self.peft_config[adapter_name] = peft_config self.base_model.inject_adapter(self.base_model.model, adapter_name) except Exception: # somthing went wrong, roll back if adapter_name in self.peft_config: del self.peft_config[adapter_name] raise self.set_additional_trainable_modules(peft_config, adapter_name) def set_additional_trainable_modules(self, peft_config, adapter_name): if getattr(peft_config, "modules_to_save", None) is not None: if self.modules_to_save is None: self.modules_to_save = set(peft_config.modules_to_save) else: self.modules_to_save.update(peft_config.modules_to_save) _set_trainable(self, adapter_name) @classmethod def _split_kwargs(cls, kwargs: Dict[str, Any]): _kwargs_not_in_hf_hub_download_signature = ("use_auth_token",) hf_hub_download_kwargs = {} other_kwargs = {} for key, value in kwargs.items(): if key in inspect.signature(hf_hub_download).parameters or key in _kwargs_not_in_hf_hub_download_signature: hf_hub_download_kwargs[key] = value else: other_kwargs[key] = value return hf_hub_download_kwargs, other_kwargs def load_adapter(self, model_id: str, adapter_name: str, is_trainable: bool = False, **kwargs: Any): """ Load a trained adapter into the model. The name for the new adapter should be unique. The new adapter is not automatically set as the active adapter. Use [`PeftModel.set_adapter`] to set the active adapter. Args: adapter_name (`str`): The name of the adapter to be added. peft_config ([`PeftConfig`]): The configuration of the adapter to be added. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and can only be used for inference. kwargs: (`optional`): Additional arguments to modify the way the adapter is loaded, e.g. the token for Hugging Face Hub. """ from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING hf_hub_download_kwargs, kwargs = self._split_kwargs(kwargs) torch_device = infer_device() if adapter_name not in self.peft_config: # load the config peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, **hf_hub_download_kwargs, ) ].from_pretrained( model_id, **hf_hub_download_kwargs, ) if peft_config.is_prompt_learning and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: peft_config.inference_mode = not is_trainable self.add_adapter(adapter_name, peft_config) adapters_weights = load_peft_weights(model_id, device=torch_device, **hf_hub_download_kwargs) # load the weights into the model load_result = set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name) if ( (getattr(self, "hf_device_map", None) is not None) and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) and len(self.peft_config) == 1 ): device_map = kwargs.get("device_map", "auto") max_memory = kwargs.get("max_memory", None) offload_dir = kwargs.get("offload_folder", None) offload_index = kwargs.get("offload_index", None) dispatch_model_kwargs = {} # Safety checker for previous `accelerate` versions # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ if "offload_index" in inspect.signature(dispatch_model).parameters: dispatch_model_kwargs["offload_index"] = offload_index no_split_module_classes = self._no_split_modules if device_map != "sequential": max_memory = get_balanced_memory( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=(device_map == "balanced_low_0"), ) if isinstance(device_map, str): device_map = infer_auto_device_map( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes ) dispatch_model( self, device_map=device_map, offload_dir=offload_dir, **dispatch_model_kwargs, ) hook = AlignDevicesHook(io_same_device=True) if self.peft_config[adapter_name].is_prompt_learning: remove_hook_from_submodules(self.prompt_encoder) add_hook_to_module(self.get_base_model(), hook) # Set model in evaluation mode to deactivate Dropout modules by default if not is_trainable: self.eval() return load_result def set_adapter(self, adapter_name: str) -> None: """ Sets the active adapter. Only one adapter can be active at a time. Args: adapter_name (`str`): The name of the adapter to be set as active. The adapter must be loaded first. """ if adapter_name not in self.peft_config: raise ValueError(f"Adapter {adapter_name} not found.") self.active_adapter = adapter_name if not self.peft_config[adapter_name].is_prompt_learning: self.base_model.set_adapter(adapter_name) _set_adapter(self, adapter_name) @property def base_model_torch_dtype(self): return getattr(self.base_model, "dtype", None) @property def active_peft_config(self): return self.peft_config[self.active_adapter] def create_or_update_model_card(self, output_dir: str): """ Updates or create model card to include information about peft: 1. Adds `peft` library tag 2. Adds peft version 3. Adds base model info 4. Adds quantization information if it was used """ filename = os.path.join(output_dir, "README.md") card = ModelCard.load(filename) if os.path.exists(filename) else ModelCard.from_template(ModelCardData()) card.data["library_name"] = "peft" model_config = getattr(self, "config", None) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() if model_config is not None: card.data["base_model"] = model_config["_name_or_path"] lines = card.text.splitlines() quantization_config = None if hasattr(model_config, "quantization_config"): quantization_config = self.config.quantization_config.to_dict() training_config_text = "" quantization_prefix = "The following `bitsandbytes` quantization config was used during training:" # Adds quantization information if it was used if quantization_config is not None: training_config_text += f"\n{quantization_prefix}\n" training_config_text += "\n".join([f"- {name}: {value}" for name, value in quantization_config.items()]) training_config_text += "\n" training_procedure_heading = "## Training procedure" if quantization_prefix not in lines and bool(training_config_text): if training_procedure_heading in lines: lines.insert(lines.index(training_procedure_heading) + 2, training_config_text) else: lines.append(f"{training_procedure_heading}\n{training_config_text}") # Adds peft version framework_block_heading = "### Framework versions" if f"- PEFT {__version__}" not in lines: if framework_block_heading in lines: lines.insert(lines.index(framework_block_heading) + 2, f"- PEFT {__version__}") else: lines.append(f"{framework_block_heading}\n\n- PEFT {__version__}") card.text = "\n".join(lines) card.save(filename) class PeftModelForSequenceClassification(PeftModel): """ Peft model for sequence classification tasks. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForSequenceClassification >>> from peft import PeftModelForSequenceClassification, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "SEQ_CLS", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 768, ... "num_transformer_submodules": 1, ... "num_attention_heads": 12, ... "num_layers": 12, ... "encoder_hidden_size": 768, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForSequenceClassification(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__(model, peft_config, adapter_name) if self.modules_to_save is None: self.modules_to_save = {"classifier", "score"} else: self.modules_to_save.update({"classifier", "score"}) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable _set_trainable(self, adapter_name) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict peft_config = self.active_peft_config if not peft_config.is_prompt_learning: return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = _get_batch_size(input_ids, inputs_embeds) past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(labels=labels, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) pooled_output = outputs[1] if len(outputs) > 1 else outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: pooled_output = self.base_model.dropout(pooled_output) logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.base_model.num_labels == 1: self.config.problem_type = "regression" elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.base_model.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForCausalLM(PeftModel): """ Peft model for causal language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModelForCausalLM, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "CAUSAL_LM", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 1280, ... "num_transformer_submodules": 1, ... "num_attention_heads": 20, ... "num_layers": 36, ... "encoder_hidden_size": 1280, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForCausalLM.from_pretrained("gpt2-large") >>> peft_model = PeftModelForCausalLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544 ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__(model, peft_config, adapter_name) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config if not peft_config.is_prompt_learning: if self.base_model.config.model_type == "mpt": if inputs_embeds is not None: raise AssertionError("forward in MPTForCausalLM does not support inputs_embeds") return self.base_model( input_ids=input_ids, attention_mask=attention_mask, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model( input_ids=input_ids, inputs_embeds=inputs_embeds, past_key_values=past_key_values, **kwargs ) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # concat prompt labels if labels is not None: prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device) kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def generate(self, **kwargs): self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation if hasattr(self.base_model, "model"): self.base_model.model.generation_config = self.generation_config else: self.base_model.generation_config = self.generation_config try: outputs = self.base_model.generate(**kwargs) except: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation raise else: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation return outputs def prepare_inputs_for_generation(self, *args, task_ids: torch.Tensor = None, **kwargs): peft_config = self.active_peft_config model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) if peft_config.is_prompt_learning: if model_kwargs.get("attention_mask", None) is not None: prefix_attention_mask = torch.ones( model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens ).to(model_kwargs["input_ids"].device) model_kwargs["attention_mask"] = torch.cat( (prefix_attention_mask, model_kwargs["attention_mask"]), dim=1 ) if model_kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") model_kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn( "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" ) kwargs["token_type_ids"] = None if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0]) model_kwargs["past_key_values"] = past_key_values else: if model_kwargs["past_key_values"] is None: inputs_embeds = self.word_embeddings(model_kwargs["input_ids"]) prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0], task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1) model_kwargs["input_ids"] = None return model_kwargs class PeftModelForSeq2SeqLM(PeftModel): """ Peft model for sequence-to-sequence language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM >>> from peft import PeftModelForSeq2SeqLM, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "SEQ_2_SEQ_LM", ... "inference_mode": False, ... "r": 8, ... "target_modules": ["q", "v"], ... "lora_alpha": 32, ... "lora_dropout": 0.1, ... "fan_in_fan_out": False, ... "enable_lora": None, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566 ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__(model, peft_config, adapter_name) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation self.base_model_prepare_encoder_decoder_kwargs_for_generation = ( self.base_model._prepare_encoder_decoder_kwargs_for_generation ) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config if not peft_config.is_prompt_learning: return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_inputs_embeds=decoder_inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if decoder_attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( decoder_attention_mask.device ) if peft_config.peft_type not in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]: decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, past_key_values=past_key_values, **kwargs, ) elif peft_config.peft_type in [PeftType.PROMPT_TUNING, PeftType.P_TUNING]: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( attention_mask.device ) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) return self.base_model( inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs, ) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if decoder_inputs_embeds is None and decoder_input_ids is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) decoder_inputs_embeds = self.word_embeddings(decoder_input_ids) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( attention_mask.device ) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, attention_mask), dim=1) # concat prompt labels if labels is not None: if peft_config.num_transformer_submodules == 1: kwargs["labels"] = labels elif peft_config.num_transformer_submodules == 2: prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(labels.device) kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) if peft_config.num_transformer_submodules == 1: return self.base_model(inputs_embeds=inputs_embeds, **kwargs) elif peft_config.num_transformer_submodules == 2: decoder_inputs_embeds = torch.cat( (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1 ) return self.base_model( inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs ) def generate(self, **kwargs): peft_config = self.active_peft_config self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( self._prepare_encoder_decoder_kwargs_for_generation ) try: if not peft_config.is_prompt_learning: outputs = self.base_model.generate(**kwargs) else: if "input_ids" not in kwargs: raise ValueError("input_ids must be provided for Peft model generation") if kwargs.get("position_ids", None) is not None: warnings.warn( "Position ids are not supported for parameter efficient tuning. Ignoring position ids." ) kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn( "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" ) kwargs["token_type_ids"] = None if peft_config.peft_type == PeftType.PREFIX_TUNING: outputs = self.base_model.generate(**kwargs) elif peft_config.peft_type in [ PeftType.PROMPT_TUNING, PeftType.P_TUNING, PeftType.MULTITASK_PROMPT_TUNING, ]: kwargs = deepcopy(kwargs) if "encoder_outputs" in kwargs: del kwargs["encoder_ouputs"] warnings.warn( "`encoder_outputs` should not be passed to `generate` when using prompt tuning. Ignoring it." ) input_ids = kwargs.pop("input_ids") inputs_embeds = self.word_embeddings(input_ids) batch_size = inputs_embeds.shape[0] prompts = self.get_prompt(batch_size=batch_size, task_ids=kwargs.pop("task_ids", None)) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1) kwargs["inputs_embeds"] = inputs_embeds if "attention_mask" in kwargs: prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to( kwargs["attention_mask"].device ) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1) return self.base_model.generate(**kwargs) else: raise NotImplementedError except: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( self.base_model_prepare_encoder_decoder_kwargs_for_generation ) raise else: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation self.base_model._prepare_encoder_decoder_kwargs_for_generation = ( self.base_model_prepare_encoder_decoder_kwargs_for_generation ) return outputs def prepare_inputs_for_generation(self, *args, **kwargs): peft_config = self.active_peft_config model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING: batch_size = model_kwargs["decoder_input_ids"].shape[0] past_key_values = self.get_prompt(batch_size) model_kwargs["past_key_values"] = past_key_values return model_kwargs class PeftModelForTokenClassification(PeftModel): """ Peft model for token classification tasks. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForSequenceClassification >>> from peft import PeftModelForTokenClassification, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "TOKEN_CLS", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 768, ... "num_transformer_submodules": 1, ... "num_attention_heads": 12, ... "num_layers": 12, ... "encoder_hidden_size": 768, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForTokenClassification.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForTokenClassification(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig = None, adapter_name: str = "default") -> None: super().__init__(model, peft_config, adapter_name) if self.modules_to_save is None: self.modules_to_save = {"classifier", "score"} else: self.modules_to_save.update({"classifier", "score"}) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable _set_trainable(self, adapter_name) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, task_ids=None, **kwargs, ): peft_config = self.active_peft_config return_dict = return_dict if return_dict is not None else self.config.use_return_dict if not peft_config.is_prompt_learning: return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size, task_ids=task_ids) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = _get_batch_size(input_ids, inputs_embeds) past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(labels=labels, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) sequence_output = outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: sequence_output = self.base_model.dropout(sequence_output) logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForQuestionAnswering(PeftModel): """ Peft model for extractive question answering. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForQuestionAnswering >>> from peft import PeftModelForQuestionAnswering, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "QUESTION_ANS", ... "inference_mode": False, ... "r": 16, ... "target_modules": ["query", "value"], ... "lora_alpha": 32, ... "lora_dropout": 0.05, ... "fan_in_fan_out": False, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForQuestionAnswering.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForQuestionAnswering(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 592900 || all params: 108312580 || trainable%: 0.5473971721475013 ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__(model, peft_config, adapter_name) if self.modules_to_save is None: self.modules_to_save = {"qa_outputs"} else: self.modules_to_save.update({"qa_outputs"}) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable _set_trainable(self, adapter_name) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): peft_config = self.active_peft_config return_dict = return_dict if return_dict is not None else self.config.use_return_dict if not peft_config.is_prompt_learning: return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, start_positions=start_positions, end_positions=end_positions, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "start_positions": start_positions, "end_positions": end_positions, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.word_embeddings.weight.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = _get_batch_size(input_ids, inputs_embeds) past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(start_positions=start_positions, end_positions=end_positions, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) sequence_output = outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: sequence_output = self.base_model.dropout(sequence_output) logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForFeatureExtraction(PeftModel): """ Peft model for extracting features/embeddings from transformer models Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. Example: ```py >>> from transformers import AutoModel >>> from peft import PeftModelForFeatureExtraction, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "FEATURE_EXTRACTION", ... "inference_mode": False, ... "r": 16, ... "target_modules": ["query", "value"], ... "lora_alpha": 32, ... "lora_dropout": 0.05, ... "fan_in_fan_out": False, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModel.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForFeatureExtraction(model, peft_config) >>> peft_model.print_trainable_parameters() ``` """ def __init__(self, model: torch.nn.Module, peft_config: PeftConfig, adapter_name: str = "default"): super().__init__(model, peft_config, adapter_name) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): peft_config = self.active_peft_config if not peft_config.is_prompt_learning: return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = _get_batch_size(input_ids, inputs_embeds) if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(attention_mask.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs)
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/mixed_model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import os from contextlib import contextmanager from typing import Any, Optional, Union import torch from accelerate.hooks import remove_hook_from_submodules from torch import nn from transformers.utils import PushToHubMixin from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES from .config import PeftConfig from .peft_model import PeftModel from .tuners import ( AdaLoraModel, IA3Model, LoHaModel, LoKrModel, LoraModel, MixedModel, OFTModel, ) from .utils import PeftType, _set_adapter, _set_trainable PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.LOHA: LoHaModel, PeftType.LOKR: LoKrModel, PeftType.ADALORA: AdaLoraModel, PeftType.IA3: IA3Model, PeftType.OFT: OFTModel, } def _prepare_model_for_gradient_checkpointing(model: nn.Module) -> None: r""" Prepares the model for gradient checkpointing if necessary """ # Note: same as PeftModel._prepare_model_for_gradient_checkpointing if not getattr(model, "is_gradient_checkpointing", True): return model if not ( getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) or getattr(model, "is_quantized", False) ): if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() elif hasattr(model, "get_input_embeddings"): def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) def _check_config_compatible(peft_config: PeftConfig) -> None: if peft_config.peft_type not in COMPATIBLE_TUNER_TYPES: raise ValueError( f"The provided `peft_type` '{peft_config.peft_type.value}' is not compatible with the `PeftMixedModel`. " f"Compatible types are: {COMPATIBLE_TUNER_TYPES}" ) class PeftMixedModel(PushToHubMixin, torch.nn.Module): """ Peft model for mixing different types of adapters. This class currently does not support saving and loading. Instead, it is assumed that the adapters are already trained and loading the model requires a script to be run each time. Currently, the main purpose of mixed adapter types is to combine trained adapters for inference. Although it is technically possible to train a mixed adapter model, this has not been tested and is not recommended. Note: This class should usually not be initialized directly. Instead, use `get_peft_model` with the argument `mixed=True`. Below is an example that shows how to load a mixed model with two different types of adapters. ```py >>> from peft import get_peft_model >>> base_model = ... # load the base model, e.g. from transformers >>> peft_model = PeftMixedModel.from_pretrained(base_model, path_to_adapter1, "adapter1").eval() >>> peft_model.load_adapter(path_to_adapter2, "adapter2") >>> peft_model.set_adapter(["adapter1", "adapter2"]) # activate both adapters >>> peft_model(data) # forward pass using both adapters ``` Tips: - Not all adapter types can be combined. See `peft.tuners.mixed.COMPATIBLE_TUNER_TYPES` for a list of compatible types. An error will be raised if you are trying to combine incompatible adapter types. - It is possible to mix multiple adapters of the same type. This can be useful to combine adapters with very different configs. - If you want to combine a lot of different adapters, it is most performant to add the same types of adapters consecutively. E.g., add LoRA1, LoRA2, LoHa1, LoHa2 in this order, instead of LoRA1, LoHa1, LoRA2, LoHa2. As long as the adapters are commutative, the order does not matter for the final result. Args: model (`torch.nn.Module`): The model to be tuned. config (`PeftConfig`): The config of the model to be tuned. The adapter type must be compatible. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the first adapter. """ def __init__(self, model: nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__() _check_config_compatible(peft_config) _prepare_model_for_gradient_checkpointing(model) self.modules_to_save = None self.base_model = MixedModel(model, {adapter_name: peft_config}, adapter_name) self.set_modules_to_save(peft_config, adapter_name) self.config = getattr(model, "config", {"model_type": "custom"}) # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected # behavior we disable that in this line. if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): self.base_model.config.pretraining_tp = 1 @property def peft_config(self) -> dict[str, PeftConfig]: return self.base_model.peft_config @property def active_adapter(self) -> str: return self.base_model.active_adapter @property def active_adapters(self) -> list[str]: return self.base_model.active_adapters def get_nb_trainable_parameters(self): r""" Returns the number of trainable parameters and number of all parameters in the model. """ # note: same as PeftModel.get_nb_trainable_parameters trainable_params = 0 all_param = 0 for _, param in self.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel # Due to the design of 4bit linear layers from bitsandbytes # one needs to multiply the number of parameters by 2 to get # the correct number of parameters if param.__class__.__name__ == "Params4bit": num_params = num_params * 2 all_param += num_params if param.requires_grad: trainable_params += num_params return trainable_params, all_param def print_trainable_parameters(self): """ Prints the number of trainable parameters in the model. """ # note: same as PeftModel.print_trainable_parameters trainable_params, all_param = self.get_nb_trainable_parameters() print( f"trainable params: {trainable_params:,d} || " f"all params: {all_param:,d} || " f"trainable%: {100 * trainable_params / all_param:.4f}" ) def forward(self, *args: Any, **kwargs: Any): """ Forward pass of the model. """ return self.base_model(*args, **kwargs) def generate(self, *args: Any, **kwargs: Any): """ Generate output. """ return self.base_model.generate(*args, **kwargs) @contextmanager def disable_adapter(self): """ Disables the adapter module. """ try: self.base_model.disable_adapter_layers() yield finally: self.base_model.enable_adapter_layers() def add_adapter(self, adapter_name: str, peft_config: PeftConfig): _check_config_compatible(peft_config) try: self.peft_config[adapter_name] = peft_config self.base_model.inject_adapter(self, adapter_name) except Exception: # somthing went wrong, roll back if adapter_name in self.peft_config: del self.peft_config[adapter_name] raise self.set_modules_to_save(peft_config, adapter_name) def set_modules_to_save(self, peft_config: PeftConfig, adapter_name: str) -> None: if (modules_to_save := getattr(peft_config, "modules_to_save", None)) is None: return if self.modules_to_save is None: self.modules_to_save = set(modules_to_save) else: self.modules_to_save.update(modules_to_save) _set_trainable(self, adapter_name) def set_adapter(self, adapter_name: Union[str, list[str]]) -> None: """ Sets the active adapter(s) for the model. Note that the order in which the adapters are applied during the forward pass may not be the same as the order in which they are passed to this function. Instead, the order during the forward pass is determined by the order in which the adapters were loaded into the model. The active adapters only determine which adapters are active during the forward pass, but not the order in which they are applied. Args: adapter_name (`str` or `List[str]`): The name of the adapter(s) to be activated. """ if isinstance(adapter_name, str): adapter_name = [adapter_name] mismatched = set(adapter_name) - set(self.peft_config.keys()) if mismatched: raise ValueError( f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" ) self.base_model.set_adapter(adapter_name) _set_adapter(self, adapter_name) def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None: if isinstance(adapter_name, str): adapter_name = [adapter_name] mismatched = set(adapter_name) - set(self.peft_config.keys()) if mismatched: raise ValueError( f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" ) self.base_model.delete_adapter(adapter_name) def merge_and_unload(self, *args: Any, **kwargs: Any): r""" This method merges the adapter layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ return self.base_model.merge_and_unload(*args, **kwargs) def unload(self, *args: Any, **kwargs: Any): """ Gets back the base model by removing all the adapter modules without merging. This gives back the original base model. """ return self.base_model.unload(*args, **kwargs) @classmethod def _split_kwargs(cls, kwargs: dict[str, Any]): return PeftModel._split_kwargs(kwargs) def load_adapter(self, model_id: str, adapter_name: str, *args: Any, **kwargs: Any): output = PeftModel.load_adapter(self, model_id, adapter_name, *args, **kwargs) # TODO: not quite clear why this is necessary but tests fail without it self.set_adapter(self.active_adapters) return output def create_or_update_model_card(self, output_dir: str): raise NotImplementedError(f"Model card creation is not supported for {self.__class__.__name__} (yet).") def save_pretrained( self, save_directory: str, safe_serialization: bool = False, selected_adapters: Optional[list[str]] = None, **kwargs: Any, ): raise NotImplementedError(f"Saving is not supported for {self.__class__.__name__} (yet).") @classmethod def from_pretrained( cls, model: nn.Module, model_id: str | os.PathLike, adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, **kwargs: Any, ): r""" Instantiate a PEFT mixed model from a pretrained model and loaded PEFT weights. Note that the passed `model` may be modified inplace. Args: model (`nn.Module`): The model to be adapted. model_id (`str` or `os.PathLike`): The name of the PEFT configuration to use. Can be either: - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` method (`./my_peft_config_directory/`). adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to be loaded. This is useful for loading multiple adapters. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and use for inference config ([`~peft.PeftConfig`], *optional*): The configuration object to use instead of an automatically loaded configuation. This configuration object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already loaded before calling `from_pretrained`. kwargs: (`optional`): Additional keyword arguments passed along to the specific PEFT configuration class. """ # note: adapted from PeftModel.from_pretrained from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING # load the config if config is None: config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, subfolder=kwargs.get("subfolder", None), revision=kwargs.get("revision", None), cache_dir=kwargs.get("cache_dir", None), use_auth_token=kwargs.get("use_auth_token", None), ) ].from_pretrained(model_id, **kwargs) elif isinstance(config, PeftConfig): config.inference_mode = not is_trainable else: raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") # note: this is different from PeftModel.from_pretrained if config.peft_type not in PEFT_TYPE_TO_MODEL_MAPPING: raise ValueError(f"Adapter of type {config.peft_type} is not supported for mixed models.") if (getattr(model, "hf_device_map", None) is not None) and len( set(model.hf_device_map.values()).intersection({"cpu", "disk"}) ) > 0: remove_hook_from_submodules(model) if config.is_prompt_learning and is_trainable: # note: should not be possible to reach, but just in case raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: config.inference_mode = not is_trainable # note: this is different from PeftModel.from_pretrained, we always return a PeftMixedModel model = cls(model, config, adapter_name) model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) return model
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/import_utils.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import importlib.metadata as importlib_metadata from functools import lru_cache import packaging.version def is_bnb_available() -> bool: return importlib.util.find_spec("bitsandbytes") is not None def is_bnb_4bit_available() -> bool: if not is_bnb_available(): return False import bitsandbytes as bnb return hasattr(bnb.nn, "Linear4bit") def is_auto_gptq_available(): if importlib.util.find_spec("auto_gptq") is not None: AUTOGPTQ_MINIMUM_VERSION = packaging.version.parse("0.5.0") version_autogptq = packaging.version.parse(importlib_metadata.version("auto_gptq")) if AUTOGPTQ_MINIMUM_VERSION <= version_autogptq: return True else: raise ImportError( f"Found an incompatible version of auto-gptq. Found version {version_autogptq}, " f"but only versions above {AUTOGPTQ_MINIMUM_VERSION} are supported" ) def is_optimum_available() -> bool: return importlib.util.find_spec("optimum") is not None @lru_cache() def is_torch_tpu_available(check_device=True): "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" if importlib.util.find_spec("torch_xla") is not None: if check_device: # We need to check if `xla_device` can be found, will raise a RuntimeError if not try: import torch_xla.core.xla_model as xm _ = xm.xla_device() return True except RuntimeError: return False return True return False
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/mapping.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from typing import TYPE_CHECKING, Any, Dict import torch from .config import PeftConfig from .mixed_model import PeftMixedModel from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) from .tuners import ( AdaLoraConfig, AdaLoraModel, AdaptionPromptConfig, IA3Config, IA3Model, LoHaConfig, LoHaModel, LoKrConfig, LoKrModel, LoraConfig, LoraModel, MultitaskPromptTuningConfig, OFTConfig, OFTModel, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, ) from .utils import _prepare_prompt_learning_config if TYPE_CHECKING: from transformers import PreTrainedModel MODEL_TYPE_TO_PEFT_MODEL_MAPPING: Dict[str, PeftModel] = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, "QUESTION_ANS": PeftModelForQuestionAnswering, "FEATURE_EXTRACTION": PeftModelForFeatureExtraction, } PEFT_TYPE_TO_CONFIG_MAPPING: Dict[str, PeftConfig] = { "ADAPTION_PROMPT": AdaptionPromptConfig, "PROMPT_TUNING": PromptTuningConfig, "PREFIX_TUNING": PrefixTuningConfig, "P_TUNING": PromptEncoderConfig, "LORA": LoraConfig, "LOHA": LoHaConfig, "LOKR": LoKrConfig, "ADALORA": AdaLoraConfig, "IA3": IA3Config, "MULTITASK_PROMPT_TUNING": MultitaskPromptTuningConfig, "OFT": OFTConfig, } PEFT_TYPE_TO_TUNER_MAPPING = { "LORA": LoraModel, "LOHA": LoHaModel, "LOKR": LoKrModel, "ADALORA": AdaLoraModel, "IA3": IA3Model, "OFT": OFTModel, } def get_peft_config(config_dict: Dict[str, Any]) -> PeftConfig: """ Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. """ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) def get_peft_model( model: PreTrainedModel, peft_config: PeftConfig, adapter_name: str = "default", mixed: bool = False ) -> PeftModel | PeftMixedModel: """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). mixed (`bool`, `optional`, defaults to `False`): Whether to allow mixing different (compatible) adapter types. """ model_config = getattr(model, "config", {"model_type": "custom"}) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if mixed: return PeftMixedModel(model, peft_config, adapter_name=adapter_name) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not peft_config.is_prompt_learning: return PeftModel(model, peft_config, adapter_name=adapter_name) if peft_config.is_prompt_learning: peft_config = _prepare_prompt_learning_config(peft_config, model_config) return MODEL_TYPE_TO_PEFT_MODEL_MAPPING[peft_config.task_type](model, peft_config, adapter_name=adapter_name) def inject_adapter_in_model( peft_config: PeftConfig, model: torch.nn.Module, adapter_name: str = "default" ) -> torch.nn.Module: r""" A simple API to create and inject adapter in-place into a model. Currently the API does not support prompt learning methods and adaption prompt. Make sure to have the correct `target_names` set in the `peft_config` object. The API calls `get_peft_model` under the hood but would be restricted only to non-prompt learning methods. Args: peft_config (`PeftConfig`): Configuration object containing the parameters of the Peft model. model (`torch.nn.Module`): The input model where the adapter will be injected. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the adapter to be injected, if not provided, the default adapter name is used ("default"). """ if peft_config.is_prompt_learning or peft_config.is_adaption_prompt: raise ValueError("`create_and_replace` does not support prompt learning and adaption prompt yet.") if peft_config.peft_type not in PEFT_TYPE_TO_TUNER_MAPPING.keys(): raise ValueError( f"`inject_adapter_in_model` does not support {peft_config.peft_type} yet. Please use `get_peft_model`." ) tuner_cls = PEFT_TYPE_TO_TUNER_MAPPING[peft_config.peft_type] # By instantiating a peft model we are injecting randomly initialized LoRA layers into the model's modules. peft_model = tuner_cls(model, peft_config, adapter_name=adapter_name) return peft_model.model
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/helpers.py
import inspect from copy import deepcopy from functools import update_wrapper from types import MethodType from .peft_model import PeftModel def update_forward_signature(model: PeftModel) -> None: """ Args: Updates the forward signature of the PeftModel to include parents class signature model (`PeftModel`): Peft model to update the forward signature Example: ```python >>> from transformers import WhisperForConditionalGeneration >>> from peft import get_peft_model, LoraConfig, update_forward_signature >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") >>> peft_config = LoraConfig(r=8, lora_alpha=32, lora_dropout=0.1, target_modules=["q_proj", "v_proj"]) >>> peft_model = get_peft_model(model, peft_config) >>> update_forward_signature(peft_model) ``` """ # Only update signature when the current forward signature only has *args and **kwargs current_signature = inspect.signature(model.forward) if ( len(current_signature.parameters) == 2 and "args" in current_signature.parameters and "kwargs" in current_signature.parameters ): forward = deepcopy(model.forward.__func__) update_wrapper( forward, type(model.get_base_model()).forward, assigned=("__doc__", "__name__", "__annotations__") ) model.forward = MethodType(forward, model) def update_generate_signature(model: PeftModel) -> None: """ Args: Updates the generate signature of a PeftModel with overriding generate to include parents class signature model (`PeftModel`): Peft model to update the generate signature Example: ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> from peft import get_peft_model, LoraConfig, TaskType, update_generate_signature >>> model_name_or_path = "bigscience/mt0-large" >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) >>> peft_config = LoraConfig( ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ... ) >>> peft_model = get_peft_model(model, peft_config) >>> update_generate_signature(peft_model) >>> help(peft_model.generate) ``` """ if not hasattr(model, "generate"): return current_signature = inspect.signature(model.generate) if ( len(current_signature.parameters) == 2 and "args" in current_signature.parameters and "kwargs" in current_signature.parameters ) or (len(current_signature.parameters) == 1 and "kwargs" in current_signature.parameters): generate = deepcopy(model.generate.__func__) update_wrapper( generate, type(model.get_base_model()).generate, assigned=("__doc__", "__name__", "__annotations__"), ) model.generate = MethodType(generate, model) def update_signature(model: PeftModel, method: str = "all") -> None: """ Args: Updates the signature of a PeftModel include parents class signature for forward or generate method model (`PeftModel`): Peft model to update generate or forward signature method (`str`): method to update signature choose one of "forward", "generate", "all" Example: ```python >>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer >>> from peft import get_peft_model, LoraConfig, TaskType, update_signature >>> model_name_or_path = "bigscience/mt0-large" >>> tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) >>> model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) >>> peft_config = LoraConfig( ... task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1 ... ) >>> peft_model = get_peft_model(model, peft_config) >>> update_signature(peft_model) >>> help(peft_model.generate) ``` """ if method == "forward": update_forward_signature(model) elif method == "generate": update_generate_signature(model) elif method == "all": update_forward_signature(model) update_generate_signature(model) else: raise ValueError(f"method {method} is not supported please choose one of ['forward', 'generate', 'all']")
0
hf_public_repos/peft/src
hf_public_repos/peft/src/peft/auto.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import importlib from typing import Optional from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, ) from .config import PeftConfig from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) class _BaseAutoPeftModel: _target_class = None _target_peft_class = None def __init__(self, *args, **kwargs): # For consistency with transformers: https://github.com/huggingface/transformers/blob/91d7df58b6537d385e90578dac40204cb550f706/src/transformers/models/auto/auto_factory.py#L400 raise EnvironmentError( f"{self.__class__.__name__} is designed to be instantiated " f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " f"`{self.__class__.__name__}.from_config(config)` methods." ) @classmethod def from_pretrained( cls, pretrained_model_name_or_path, adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, **kwargs, ): r""" A wrapper around all the preprocessing steps a user needs to perform in order to load a PEFT model. The kwargs are passed along to `PeftConfig` that automatically takes care of filtering the kwargs of the Hub methods and the config object init. """ peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, **kwargs) base_model_path = peft_config.base_model_name_or_path task_type = getattr(peft_config, "task_type", None) if cls._target_class is not None: target_class = cls._target_class elif cls._target_class is None and task_type is not None: # this is only in the case where we use `AutoPeftModel` raise ValueError( "Cannot use `AutoPeftModel` with a task type, please use a specific class for your task type. (e.g. `AutoPeftModelForCausalLM` for `task_type='CAUSAL_LM'`)" ) if task_type is not None: expected_target_class = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[task_type] if cls._target_peft_class.__name__ != expected_target_class.__name__: raise ValueError( f"Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__ }" " make sure that you are loading the correct model for your task type." ) elif task_type is None and getattr(peft_config, "auto_mapping", None) is not None: auto_mapping = getattr(peft_config, "auto_mapping", None) base_model_class = auto_mapping["base_model_class"] parent_library_name = auto_mapping["parent_library"] parent_library = importlib.import_module(parent_library_name) target_class = getattr(parent_library, base_model_class) else: raise ValueError( "Cannot infer the auto class from the config, please make sure that you are loading the correct model for your task type." ) base_model = target_class.from_pretrained(base_model_path, **kwargs) return cls._target_peft_class.from_pretrained( base_model, pretrained_model_name_or_path, adapter_name=adapter_name, is_trainable=is_trainable, config=config, **kwargs, ) class AutoPeftModel(_BaseAutoPeftModel): _target_class = None _target_peft_class = PeftModel class AutoPeftModelForCausalLM(_BaseAutoPeftModel): _target_class = AutoModelForCausalLM _target_peft_class = PeftModelForCausalLM class AutoPeftModelForSeq2SeqLM(_BaseAutoPeftModel): _target_class = AutoModelForSeq2SeqLM _target_peft_class = PeftModelForSeq2SeqLM class AutoPeftModelForSequenceClassification(_BaseAutoPeftModel): _target_class = AutoModelForSequenceClassification _target_peft_class = PeftModelForSequenceClassification class AutoPeftModelForTokenClassification(_BaseAutoPeftModel): _target_class = AutoModelForTokenClassification _target_peft_class = PeftModelForTokenClassification class AutoPeftModelForQuestionAnswering(_BaseAutoPeftModel): _target_class = AutoModelForQuestionAnswering _target_peft_class = PeftModelForQuestionAnswering class AutoPeftModelForFeatureExtraction(_BaseAutoPeftModel): _target_class = AutoModel _target_peft_class = PeftModelForFeatureExtraction
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/loftq_utils.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Reference code: https://github.com/yxli2123/LoftQ/blob/main/utils.py # Reference paper: https://arxiv.org/abs/2310.08659 import logging from typing import Union import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available if is_bnb_available(): import bitsandbytes as bnb class NFQuantizer: def __init__(self, num_bits=2, device="cuda", method="normal", block_size=64, *args, **kwargs): super().__init__(*args, **kwargs) self.num_bits = num_bits self.device = device self.method = method self.block_size = block_size if self.method == "normal": self.norm_lookup_table = self.create_normal_map(num_bits=self.num_bits) self.norm_lookup_table = self.norm_lookup_table.to(device) elif self.method == "uniform": self.norm_lookup_table = self.create_uniform_map(num_bits=self.num_bits) self.norm_lookup_table = self.norm_lookup_table.to(device) else: raise NotImplementedError("Other quantization methods not supported yet.") @staticmethod def create_uniform_map(symmetric=False, num_bits=4): if symmetric: # print("symmetric uniform quantization") negative = torch.linspace(-1, 0, 2 ** (num_bits - 1)) positive = torch.linspace(0, 1, 2 ** (num_bits - 1)) table = torch.cat([negative, positive[1:]]) else: # print("asymmetric uniform quantization") table = torch.linspace(-1, 1, 2**num_bits) return table @staticmethod def create_normal_map(offset=0.9677083, symmetric=False, num_bits=2): try: from scipy.stats import norm except ImportError: raise ImportError("The required package 'scipy' is not installed. Please install it to continue.") variations = 2**num_bits if symmetric: v = norm.ppf(torch.linspace(1 - offset, offset, variations + 1)).tolist() values = [] for index in range(len(v) - 1): values.append(0.5 * v[index] + 0.5 * v[index + 1]) v = values else: # one more positive value, this is an asymmetric type v1 = norm.ppf(torch.linspace(offset, 0.5, variations // 2 + 1)[:-1]).tolist() v2 = [0] v3 = (-norm.ppf(torch.linspace(offset, 0.5, variations // 2)[:-1])).tolist() v = v1 + v2 + v3 values = torch.Tensor(v) values = values.sort().values values /= values.max() return values def quantize_tensor(self, weight): max_abs = torch.abs(weight).max() weight_normed = weight / max_abs weight_normed_expanded = weight_normed.unsqueeze(-1) # Reshape L to have the same number of dimensions as X_expanded L_reshaped = torch.tensor(self.norm_lookup_table).reshape(1, -1) # Calculate the absolute difference between X_expanded and L_reshaped abs_diff = torch.abs(weight_normed_expanded - L_reshaped) # Find the index of the minimum absolute difference for each element qweight = torch.argmin(abs_diff, dim=-1) return qweight, max_abs def dequantize_tensor(self, qweight, max_abs): qweight_flatten = qweight.flatten() weight_normed = self.norm_lookup_table[qweight_flatten] weight = weight_normed * max_abs weight = weight.reshape(qweight.shape) return weight def quantize_block(self, weight): if len(weight.shape) != 2: raise ValueError(f"Only support 2D matrix, but your input has {len(weight.shape)} dimensions.") if weight.shape[0] * weight.shape[1] % self.block_size != 0: raise ValueError( f"Weight with shape ({weight.shape[0]} x {weight.shape[1]}) " f"is not dividable by block size {self.block_size}." ) M, N = weight.shape device = weight.device # Quantization weight_flatten = weight.flatten() # (M*N, ) weight_block = weight_flatten.reshape(-1, self.block_size) # (L, B), L = M * N / B if self.method == "normal": weight_max = weight_block.abs().max(dim=-1)[0] # (L, 1) elif self.method == "uniform": weight_max = weight_block.mean(dim=-1) + 2.5 * weight_block.std(dim=-1) else: raise NotImplementedError("Method not supported yet.") weight_max = weight_max.unsqueeze(-1) weight_divabs = weight_block / weight_max # (L, B) weight_divabs = weight_divabs.unsqueeze(-1) # (L, B, 1) L_reshaped = self.norm_lookup_table.reshape(1, -1) # (1, 2**K) abs_diff = torch.abs(weight_divabs - L_reshaped) # (L, B, 2**K) qweight = torch.argmin(abs_diff, dim=-1) # (L, B) # Pack multiple k-bit into uint8 qweight = qweight.reshape(-1, 8 // self.num_bits) qweight_pack = torch.zeros((M * N // 8 * self.num_bits, 1), dtype=torch.uint8, device=device) # data format example: # [1, 0, 3, 2] or [01, 00, 11, 10] -> [10110001], LIFO for i in range(8 // self.num_bits): qweight[:, i] = qweight[:, i] << i * self.num_bits qweight_pack[:, 0] |= qweight[:, i] return qweight_pack, weight_max, weight.shape def dequantize_block(self, qweight, weight_max, weight_shape): # unpack weight device = qweight.device weight = torch.zeros((qweight.shape[0], 8 // self.num_bits), dtype=torch.float32, device=device) for i in range(8 // self.num_bits): lookup_table_idx = qweight.to(torch.long) % 2**self.num_bits # get the most right 2 bits lookup_table_idx = lookup_table_idx.to(torch.int) weight[:, i] = self.norm_lookup_table[lookup_table_idx].squeeze() qweight = qweight >> self.num_bits # right shift 2 bits of the original data weight_block = weight.reshape(-1, self.block_size) weight = weight_block * weight_max weight = weight.reshape(weight_shape) return weight def _low_rank_decomposition(weight, reduced_rank=32): """ :param weight: The matrix to decompose, of shape (H, W) :param reduced_rank: the final rank :return: """ matrix_dimension = len(weight.size()) if matrix_dimension != 2: raise ValueError(f"Only support 2D matrix, but your input has {matrix_dimension} dimensions.") # Use SVD to decompose a matrix, default full_matrices is False to save parameters U, S, Vh = torch.linalg.svd(weight, full_matrices=False) L = U @ (torch.sqrt(torch.diag(S)[:, 0:reduced_rank])) R = torch.sqrt(torch.diag(S)[0:reduced_rank, :]) @ Vh return {"L": L, "R": R, "U": U, "S": S, "Vh": Vh, "reduced_rank": reduced_rank} @torch.no_grad() def loftq_init(weight: Union[torch.Tensor, torch.nn.Parameter], num_bits: int, reduced_rank: int, num_iter=1): if num_bits not in [2, 4, 8]: raise ValueError("Only support 2, 4, 8 bits quantization") if num_iter <= 0: raise ValueError("Number of iterations must be greater than 0") out_feature, in_feature = weight.size() device = weight.device dtype = weight.dtype logging.info( f"Weight: ({out_feature}, {in_feature}) | Rank: {reduced_rank} " f"| Num Iter: {num_iter} | Num Bits: {num_bits}" ) if not is_bnb_4bit_available(): quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64) weight = weight.to(torch.float32) res = weight.clone() for i in range(num_iter): torch.cuda.empty_cache() # Quantization if num_bits == 4 and is_bnb_4bit_available(): qweight = bnb.nn.Params4bit( res.to("cpu"), requires_grad=False, compress_statistics=False, quant_type="nf4" ).to(device) dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state) else: quantized_weight, max_abs, shape = quantizer.quantize_block(res) dequantized_weight = quantizer.dequantize_block(quantized_weight, max_abs, shape) res = weight - dequantized_weight # Decompose the residual by SVD output = _low_rank_decomposition(res, reduced_rank=reduced_rank) L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"] res = weight - torch.mm(L, R) lora_A, lora_B = R, L return dequantized_weight.to(dtype), lora_A, lora_B
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/constants.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch # needed for prefix-tuning of bloom model def bloom_model_postprocess_past_key_value(past_key_values): past_key_values = torch.cat(past_key_values) total_layers, batch_size, num_attention_heads, num_virtual_tokens, head_dim = past_key_values.shape keys = past_key_values[: total_layers // 2] keys = keys.transpose(2, 3).reshape( total_layers // 2, batch_size * num_attention_heads, head_dim, num_virtual_tokens ) values = past_key_values[total_layers // 2 :] values = values.reshape(total_layers // 2, batch_size * num_attention_heads, num_virtual_tokens, head_dim) return tuple(zip(keys, values)) # needed for prefix-tuning of StarCoder models def starcoder_model_postprocess_past_key_value(past_key_values): result = [] for k in past_key_values: k = k[:, :, 0] k = k.permute([1, 2, 0, 3]) k = k.reshape(*k.shape[:-2], -1) result.append(k) return tuple(result) TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING = { "bloom": bloom_model_postprocess_past_key_value, "gpt_bigcode": starcoder_model_postprocess_past_key_value, } TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING = { "t5": ["q", "v"], "mt5": ["q", "v"], "bart": ["q_proj", "v_proj"], "gpt2": ["c_attn"], "bloom": ["query_key_value"], "blip-2": ["q", "v", "q_proj", "v_proj"], "opt": ["q_proj", "v_proj"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "value"], "xlm-roberta": ["query", "value"], "electra": ["query", "value"], "deberta-v2": ["query_proj", "value_proj"], "deberta": ["in_proj"], "layoutlm": ["query", "value"], "llama": ["q_proj", "v_proj"], "chatglm": ["query_key_value"], "gpt_bigcode": ["c_attn"], "mpt": ["Wqkv"], "RefinedWebModel": ["query_key_value"], "RefinedWeb": ["query_key_value"], "falcon": ["query_key_value"], "btlm": ["c_proj", "c_attn"], "codegen": ["qkv_proj"], "mistral": ["q_proj", "v_proj"], "stablelm": ["q_proj", "v_proj"], "phi": ["Wqkv", "out_proj", "fc1", "fc2"], } TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING = { "t5": ["k", "v", "wo"], "mt5": ["k", "v", "wi_1"], "gpt2": ["c_attn", "mlp.c_proj"], "bloom": ["query_key_value", "mlp.dense_4h_to_h"], "roberta": ["key", "value", "output.dense"], "opt": ["q_proj", "k_proj", "fc2"], "gptj": ["q_proj", "v_proj", "fc_out"], "gpt_neox": ["query_key_value", "dense_4h_to_h"], "gpt_neo": ["q_proj", "v_proj", "c_proj"], "bart": ["q_proj", "v_proj", "fc2"], "gpt_bigcode": ["c_attn", "mlp.c_proj"], "llama": ["k_proj", "v_proj", "down_proj"], "bert": ["key", "value", "output.dense"], "deberta-v2": ["key_proj", "value_proj", "output.dense"], "deberta": ["in_proj", "output.dense"], "RefinedWebModel": ["query_key_value", "dense_4h_to_h"], "RefinedWeb": ["query_key_value", "dense_4h_to_h"], "falcon": ["query_key_value", "dense_4h_to_h"], } TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING = { "t5": ["wo"], "mt5": [], "gpt2": ["mlp.c_proj"], "bloom": ["mlp.dense_4h_to_h"], "roberta": ["output.dense"], "opt": ["fc2"], "gptj": ["fc_out"], "gpt_neox": ["dense_4h_to_h"], "gpt_neo": ["c_proj"], "bart": ["fc2"], "gpt_bigcode": ["mlp.c_proj"], "llama": ["down_proj"], "bert": ["output.dense"], "deberta-v2": ["output.dense"], "deberta": ["output.dense"], "RefinedWeb": ["dense_4h_to_h"], "RefinedWebModel": ["dense_4h_to_h"], "falcon": ["dense_4h_to_h"], } TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING = { "t5": ["q", "k", "v", "o", "wi", "wo"], "mt5": ["q", "k", "v", "o", "wi_0", "wi_1", "wo"], "bart": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"], "gpt2": ["c_attn"], "bloom": ["query_key_value"], "opt": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "llama": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "key", "value", "dense"], # "xlm-roberta": ["query", "value"], # "electra": ["query", "value"], "deberta-v2": ["query_proj", "key_proj", "value_proj", "dense"], "gpt_bigcode": ["c_attn"], "deberta": ["in_proj"], # "layoutlm": ["query", "value"], } COMMON_LAYERS_PATTERN = ["layers", "h", "block", "blocks", "layer"] WEIGHTS_NAME = "adapter_model.bin" SAFETENSORS_WEIGHTS_NAME = "adapter_model.safetensors" CONFIG_NAME = "adapter_config.json" EMBEDDING_LAYER_NAMES = ["embed_tokens", "lm_head"]
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .config import PeftConfig, PeftType, PromptLearningConfig, TaskType from .peft_types import PeftType, TaskType from .other import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, COMMON_LAYERS_PATTERN, CONFIG_NAME, WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME, _set_trainable, bloom_model_postprocess_past_key_value, prepare_model_for_int8_training, prepare_model_for_kbit_training, shift_tokens_right, transpose, _get_batch_size, _get_submodules, _set_adapter, _freeze_adapter, ModulesToSaveWrapper, _prepare_prompt_learning_config, _is_valid_match, infer_device, get_auto_gptq_quant_linear, get_quantization_config, id_tensor_storage, ) from .save_and_load import get_peft_model_state_dict, set_peft_model_state_dict, load_peft_weights
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/other.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import warnings from typing import Optional, Tuple import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from safetensors.torch import storage_ptr, storage_size from ..import_utils import is_auto_gptq_available, is_torch_tpu_available from .constants import ( COMMON_LAYERS_PATTERN, CONFIG_NAME, EMBEDDING_LAYER_NAMES, SAFETENSORS_WEIGHTS_NAME, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, bloom_model_postprocess_past_key_value, starcoder_model_postprocess_past_key_value, ) __all__ = [ "COMMON_LAYERS_PATTERN", "CONFIG_NAME", "EMBEDDING_LAYER_NAMES", "SAFETENSORS_WEIGHTS_NAME", "TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING", "TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING", "TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING", "TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING", "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING", "WEIGHTS_NAME", "bloom_model_postprocess_past_key_value", "starcoder_model_postprocess_past_key_value", ] # Get current device name based on available devices def infer_device(): if torch.cuda.is_available(): torch_device = "cuda" elif is_xpu_available(): torch_device = "xpu" elif is_npu_available(): torch_device = "npu" else: torch_device = "cpu" return torch_device def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True, gradient_checkpointing_kwargs=None): r""" Note this method only works for `transformers` models. This method wraps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model (`transformers.PreTrainedModel`): The loaded model from `transformers` use_gradient_checkpointing (`bool`, *optional*, defaults to `True`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. gradient_checkpointing_kwargs (`dict`, *optional*, defaults to `None`): Keyword arguments to pass to the gradient checkpointing function, please refer to the documentation of `torch.utils.checkpoint.checkpoint` for more details about the arguments that you can pass to that method. Note this is only available in the latest transformers versions (> 4.34.1). """ loaded_in_kbit = getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) is_gptq_quantized = getattr(model, "quantization_method", None) == "gptq" if gradient_checkpointing_kwargs is None: gradient_checkpointing_kwargs = {} for name, param in model.named_parameters(): # freeze base model's layers param.requires_grad = False if not is_gptq_quantized: # cast all non INT8 parameters to fp32 for param in model.parameters(): if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) if (loaded_in_kbit or is_gptq_quantized) and use_gradient_checkpointing: # When having `use_reentrant=False` + gradient_checkpointing, there is no need for this hack if "use_reentrant" not in gradient_checkpointing_kwargs or gradient_checkpointing_kwargs["use_reentrant"]: # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) # To support older transformers versions, check if the model supports gradient_checkpointing_kwargs _supports_gc_kwargs = "gradient_checkpointing_kwargs" in list( inspect.signature(model.gradient_checkpointing_enable).parameters ) if not _supports_gc_kwargs and len(gradient_checkpointing_kwargs) > 0: warnings.warn( "gradient_checkpointing_kwargs is not supported in this version of transformers. The passed kwargs will be ignored." " if you want to use that feature, please upgrade to the latest version of transformers.", FutureWarning, ) gc_enable_kwargs = ( {} if not _supports_gc_kwargs else {"gradient_checkpointing_kwargs": gradient_checkpointing_kwargs} ) # enable gradient checkpointing for memory efficiency model.gradient_checkpointing_enable(**gc_enable_kwargs) return model # For backward compatibility def prepare_model_for_int8_training(*args, **kwargs): warnings.warn( "prepare_model_for_int8_training is deprecated and will be removed in a future version. Use prepare_model_for_kbit_training instead.", FutureWarning, ) return prepare_model_for_kbit_training(*args, **kwargs) # copied from transformers.models.bart.modeling_bart def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids pad_token_id (`int`): The id of the `padding` token. decoder_start_token_id (`int`): The id of the `start` token. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class ModulesToSaveWrapper(torch.nn.Module): def __init__(self, module_to_save, adapter_name): super().__init__() self.original_module = module_to_save self.modules_to_save = torch.nn.ModuleDict({}) self._active_adapter = adapter_name self._disable_adapters = False self.update(adapter_name) @property def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters @property def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter def update(self, adapter_name): self.modules_to_save.update(torch.nn.ModuleDict({adapter_name: copy.deepcopy(self.original_module)})) if hasattr(self.modules_to_save[adapter_name], "_hf_hook"): old_hook = self.modules_to_save[adapter_name]._hf_hook new_hook = self._create_new_hook(old_hook) remove_hook_from_module(self.modules_to_save[adapter_name]) add_hook_to_module(self.modules_to_save[adapter_name], new_hook) self.original_module.requires_grad_(False) if adapter_name == self.active_adapter: self.modules_to_save[adapter_name].requires_grad_(True) def _create_new_hook(self, old_hook): r""" Creates a new hook based on the old hook. Use it only if you know what you are doing ! """ old_hook_cls = getattr(accelerate.hooks, old_hook.__class__.__name__) old_hook_attr = old_hook.__dict__ filtered_old_hook_attr = {} old_hook_init_signature = inspect.signature(old_hook_cls.__init__) for k in old_hook_attr.keys(): if k in old_hook_init_signature.parameters: filtered_old_hook_attr[k] = old_hook_attr[k] new_hook = old_hook_cls(**filtered_old_hook_attr) return new_hook def forward(self, *args, **kwargs): if self.disable_adapters or (self.active_adapter not in self.modules_to_save): return self.original_module(*args, **kwargs) return self.modules_to_save[self.active_adapter](*args, **kwargs) def enable_adapters(self, enabled: bool): """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if self._disable_adapters is not enabled: # already in the desired state, do nothing return if enabled: self.original_module.requires_grad_(False) self.modules_to_save[self.active_adapter].requires_grad_(True) self._disable_adapters = False else: self.original_module.requires_grad_(True) self.modules_to_save.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_name: str): """Set the active adapter Args: adapter_name (str): The name of the adapter to set as active """ if adapter_name not in self.modules_to_save: raise ValueError(f"Adapter {adapter_name} not found in {self.modules_to_save.keys()}") self.modules_to_save[self.active_adapter].requires_grad_(False) self.modules_to_save[adapter_name].requires_grad_(True) self._active_adapter = adapter_name def _get_submodules(model, key): parent = model.get_submodule(".".join(key.split(".")[:-1])) target_name = key.split(".")[-1] target = model.get_submodule(key) return parent, target, target_name def _freeze_adapter(model, adapter_name): for n, p in model.named_parameters(): if adapter_name in n: p.requires_grad = False def _set_trainable(model, adapter_name): key_list = [key for key, _ in model.named_modules()] for key in key_list: target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save) if target_module_found: parent, target, target_name = _get_submodules(model, key) if isinstance(target, ModulesToSaveWrapper): target.update(adapter_name) target.set_adapter(target.active_adapter) else: new_module = ModulesToSaveWrapper(target, adapter_name) new_module.set_adapter(adapter_name) setattr(parent, target_name, new_module) def _set_adapter(model, adapter_name): def check_adapter_name(adapter_name): if isinstance(adapter_name, str): return adapter_name # adapter_name is a list of str if len(adapter_name) > 1: raise ValueError("Only one adapter can be set at a time for modules_to_save") elif len(adapter_name) == 0: raise ValueError("Please specify at least one adapter to set") adapter_name = adapter_name[0] return adapter_name for module in model.modules(): if isinstance(module, ModulesToSaveWrapper): # only check the adapter_name if we actually encounter a ModulesToSaveWrapper, otherwise we don't care adapter_name = check_adapter_name(adapter_name) module.set_adapter(adapter_name) def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", peft_config.token_dim) return peft_config def fsdp_auto_wrap_policy(model): import functools import os from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.wrap import _or_policy, lambda_auto_wrap_policy, transformer_auto_wrap_policy from ..tuners import PrefixEncoder, PromptEmbedding, PromptEncoder def lambda_policy_fn(module): if ( len(list(module.named_children())) == 0 and getattr(module, "weight", None) is not None and module.weight.requires_grad ): return True return False lambda_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=lambda_policy_fn) transformer_wrap_policy = functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=( PrefixEncoder, PromptEncoder, PromptEmbedding, FullyShardedDataParallelPlugin.get_module_class_from_name( model, os.environ.get("FSDP_TRANSFORMER_CLS_TO_WRAP", "") ), ), ) auto_wrap_policy = functools.partial(_or_policy, policies=[lambda_policy, transformer_wrap_policy]) return auto_wrap_policy def transpose(weight, fan_in_fan_out): if not fan_in_fan_out: return weight if isinstance(weight, torch.nn.Parameter): return torch.nn.Parameter(weight.T) return weight.T def _is_valid_match(key: str, target_key: str): """ Helper function to match module names target_key and key. Makes sure that either the key is exactly the target_key or the target_key is a submodule of key """ if key.endswith(target_key): if len(key) > len(target_key): return key.endswith("." + target_key) # must be a sub module return True return False def _get_batch_size(input_ids: Optional[torch.Tensor], inputs_embeds: Optional[torch.Tensor]) -> int: """Get the batch size based on either input_ids or input_embeds Raises an ValueError if both are None. """ if (input_ids is None) and (inputs_embeds is None): raise ValueError("You have to provide either input_ids or inputs_embeds") if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] return batch_size def get_quantization_config(model: torch.nn.Module, method: str): """ Get the quantization config of the related quantization method """ if ( hasattr(model, "config") and hasattr(model.config, "quantization_config") and (getattr(model, "quantization_method", None) == method) ): return model.config.quantization_config return None def get_auto_gptq_quant_linear(gptq_quantization_config): """ Get the right AutoGPTQQuantLinear class based on the quantization config file """ if gptq_quantization_config is not None and is_auto_gptq_available(): from auto_gptq.utils.import_utils import dynamically_import_QuantLinear desc_act = gptq_quantization_config.desc_act group_size = gptq_quantization_config.group_size bits = gptq_quantization_config.bits if hasattr(gptq_quantization_config, "use_exllama"): use_exllama = gptq_quantization_config.use_exllama else: use_exllama = not gptq_quantization_config.disable_exllama if hasattr(gptq_quantization_config, "exllama_config"): exllama_version = gptq_quantization_config.exllama_config["version"] else: exllama_version = 1 AutoGPTQQuantLinear = dynamically_import_QuantLinear( use_triton=False, desc_act=desc_act, group_size=group_size, bits=bits, disable_exllama=not (use_exllama and exllama_version == 1), disable_exllamav2=not (use_exllama and exllama_version == 2), ) return AutoGPTQQuantLinear return None def id_tensor_storage(tensor: torch.Tensor) -> Tuple[torch.device, int, int]: """ Unique identifier to a tensor storage. Multiple different tensors can share the same underlying storage. For example, "meta" tensors all share the same storage, and thus their identifier will all be equal. This identifier is guaranteed to be unique and constant for this tensor's storage during its lifetime. Two tensor storages with non-overlapping lifetimes may have the same id. This method is the exact same copy of https://github.com/huggingface/transformers/blob/main/src/transformers/pytorch_utils.py#L282C1-L300C58 but we added it here manually to avoid import issue with old versions of transformers. """ if tensor.device.type == "xla" and is_torch_tpu_available(): # NOTE: xla tensors dont have storage # use some other unique id to distinguish. # this is a XLA tensor, it must be created using torch_xla's # device. So the following import is safe: import torch_xla unique_id = torch_xla._XLAC._xla_get_tensor_id(tensor) else: unique_id = storage_ptr(tensor) return tensor.device, unique_id, storage_size(tensor)
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/peft_types.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum class PeftType(str, enum.Enum): """Enum class for the different types of adapters in PEFT.""" PROMPT_TUNING = "PROMPT_TUNING" MULTITASK_PROMPT_TUNING = "MULTITASK_PROMPT_TUNING" P_TUNING = "P_TUNING" PREFIX_TUNING = "PREFIX_TUNING" LORA = "LORA" ADALORA = "ADALORA" ADAPTION_PROMPT = "ADAPTION_PROMPT" IA3 = "IA3" LOHA = "LOHA" LOKR = "LOKR" OFT = "OFT" class TaskType(str, enum.Enum): """Enum class for the different types of tasks supported by PEFT.""" SEQ_CLS = "SEQ_CLS" SEQ_2_SEQ_LM = "SEQ_2_SEQ_LM" CAUSAL_LM = "CAUSAL_LM" TOKEN_CLS = "TOKEN_CLS" QUESTION_ANS = "QUESTION_ANS" FEATURE_EXTRACTION = "FEATURE_EXTRACTION"
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/save_and_load.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import warnings from typing import Optional import torch from huggingface_hub import file_exists, hf_hub_download from huggingface_hub.utils import EntryNotFoundError from safetensors.torch import load_file as safe_load_file from .other import EMBEDDING_LAYER_NAMES, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, infer_device from .peft_types import PeftType def has_valid_embedding_base_layer(layer): """Check if the layer has an embedding base layer""" return hasattr(layer, "base_layer") and isinstance(layer.base_layer, (torch.nn.Linear, torch.nn.Embedding)) def get_embedding_layer_name(model, layer, is_prompt_learning): """Get the name of the embedding module for a given layer.""" for name, module in model.named_modules(): if (is_prompt_learning and module == layer) or module == layer.base_layer: return name return None def get_peft_model_state_dict( model, state_dict=None, adapter_name="default", unwrap_compiled=False, save_embedding_layers="auto" ): """ Get the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP, the model should be the underlying model/unwrapped model (i.e. model.module). state_dict (`dict`, *optional*, defaults to `None`): The state dict of the model. If not provided, the state dict of the passed model will be used. adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter whose state dict should be returned. unwrap_compiled (`bool`, *optional*, defaults to `False`): Whether to unwrap the model if torch.compile was used. save_embedding_layers (`Union[bool, str]`, , *optional*, defaults to `auto`): If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. Based on it sets the boolean flag. This only works for 🤗 transformers models. """ if unwrap_compiled: model = getattr(model, "_orig_mod", model) config = model.peft_config[adapter_name] if state_dict is None: state_dict = model.state_dict() if config.peft_type in (PeftType.LORA, PeftType.ADALORA): # to_return = lora_state_dict(model, bias=model.peft_config.bias) # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py` # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP bias = config.bias if bias == "none": to_return = {k: state_dict[k] for k in state_dict if "lora_" in k} elif bias == "all": to_return = {k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k} elif bias == "lora_only": to_return = {} for k in state_dict: if "lora_" in k: to_return[k] = state_dict[k] bias_name = k.split("lora_")[0] + "bias" if bias_name in state_dict: to_return[bias_name] = state_dict[bias_name] else: raise NotImplementedError to_return = {k: v for k, v in to_return.items() if (("lora_" in k and adapter_name in k) or ("bias" in k))} if config.peft_type == PeftType.ADALORA: rank_pattern = config.rank_pattern if rank_pattern is not None: rank_pattern = {k.replace(f".{adapter_name}", ""): v for k, v in rank_pattern.items()} config.rank_pattern = rank_pattern to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name) elif config.peft_type == PeftType.LOHA: to_return = {k: state_dict[k] for k in state_dict if "hada_" in k} elif config.peft_type == PeftType.LOKR: to_return = {k: state_dict[k] for k in state_dict if "lokr_" in k} elif config.peft_type == PeftType.ADAPTION_PROMPT: to_return = {k: state_dict[k] for k in state_dict if k.split(".")[-1].startswith("adaption_")} elif config.is_prompt_learning: to_return = {} if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: to_return["prefix_task_cols"] = model.prompt_encoder[adapter_name].prefix_task_cols to_return["prefix_task_rows"] = model.prompt_encoder[adapter_name].prefix_task_rows prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight else: if config.inference_mode: prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight else: prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name) to_return["prompt_embeddings"] = prompt_embeddings elif config.peft_type == PeftType.IA3: to_return = {k: state_dict[k] for k in state_dict if "ia3_" in k} elif config.peft_type == PeftType.OFT: to_return = {k: state_dict[k] for k in state_dict if "oft_" in k} else: raise NotImplementedError if getattr(model, "modules_to_save", None) is not None: for key, value in state_dict.items(): if any(f"{module_name}.modules_to_save.{adapter_name}" in key for module_name in model.modules_to_save): to_return[key.replace("modules_to_save.", "")] = value # check the common embedding layers in `target_modules` to reset `save_embedding_layers` if necessary if ( save_embedding_layers == "auto" and hasattr(config, "target_modules") and any(k in config.target_modules for k in EMBEDDING_LAYER_NAMES) ): warnings.warn("Setting `save_embedding_layers` to `True` as embedding layers found in `target_modules`.") save_embedding_layers = True elif save_embedding_layers == "auto": save_embedding_layers = False if save_embedding_layers and hasattr(model, "get_input_embeddings"): for layer in [model.get_input_embeddings(), model.get_output_embeddings()]: if config.is_prompt_learning or has_valid_embedding_base_layer(layer): # support from version >= 0.6.2 embedding_module_name = get_embedding_layer_name(model, layer, config.is_prompt_learning) if embedding_module_name: to_return.update({k: v for k, v in state_dict.items() if embedding_module_name in k}) elif save_embedding_layers: warnings.warn("Could not identify embedding layer(s) because the model is not a 🤗 transformers model.") to_return = {k.replace(f".{adapter_name}", ""): v for k, v in to_return.items()} return to_return def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name="default"): """ Set the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. peft_model_state_dict (`dict`): The state dict of the Peft model. """ config = model.peft_config[adapter_name] state_dict = {} if getattr(model, "modules_to_save", None) is not None: for key, value in peft_model_state_dict.items(): if any(module_name in key for module_name in model.modules_to_save): for module_name in model.modules_to_save: if module_name in key: key = key.replace(module_name, f"{module_name}.modules_to_save.{adapter_name}") break state_dict[key] = value else: state_dict = peft_model_state_dict if config.peft_type in (PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.ADALORA, PeftType.IA3, PeftType.OFT): peft_model_state_dict = {} parameter_prefix = { PeftType.IA3: "ia3_", PeftType.LORA: "lora_", PeftType.ADALORA: "lora_", PeftType.LOHA: "hada_", PeftType.LOKR: "lokr_", PeftType.OFT: "oft_", }[config.peft_type] for k, v in state_dict.items(): if parameter_prefix in k: suffix = k.split(parameter_prefix)[1] if "." in suffix: suffix_to_replace = ".".join(suffix.split(".")[1:]) k = k.replace(suffix_to_replace, f"{adapter_name}.{suffix_to_replace}") else: k = f"{k}.{adapter_name}" peft_model_state_dict[k] = v else: peft_model_state_dict[k] = v if config.peft_type == PeftType.ADALORA: rank_pattern = config.rank_pattern if rank_pattern is not None: model.resize_modules_by_rank_pattern(rank_pattern, adapter_name) elif config.is_prompt_learning or config.peft_type == PeftType.ADAPTION_PROMPT: peft_model_state_dict = state_dict else: raise NotImplementedError load_result = model.load_state_dict(peft_model_state_dict, strict=False) if config.is_prompt_learning: model.prompt_encoder[adapter_name].embedding.load_state_dict( {"weight": peft_model_state_dict["prompt_embeddings"]}, strict=True ) if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING: model.prompt_encoder[adapter_name].load_state_dict(peft_model_state_dict, strict=False) return load_result def load_peft_weights(model_id: str, device: Optional[str] = None, **hf_hub_download_kwargs) -> dict: r""" A helper method to load the PEFT weights from the HuggingFace Hub or locally Args: model_id (`str`): The local path to the adapter weights or the name of the adapter to load from the HuggingFace Hub. device (`str`): The device to load the weights onto. hf_hub_download_kwargs (`dict`): Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub. """ path = ( os.path.join(model_id, hf_hub_download_kwargs["subfolder"]) if hf_hub_download_kwargs.get("subfolder", None) is not None else model_id ) if device is None: device = infer_device() if os.path.exists(os.path.join(path, SAFETENSORS_WEIGHTS_NAME)): filename = os.path.join(path, SAFETENSORS_WEIGHTS_NAME) use_safetensors = True elif os.path.exists(os.path.join(path, WEIGHTS_NAME)): filename = os.path.join(path, WEIGHTS_NAME) use_safetensors = False else: token = hf_hub_download_kwargs.get("token", None) if token is None: token = hf_hub_download_kwargs.get("use_auth_token", None) has_remote_safetensors_file = file_exists( repo_id=model_id, filename=SAFETENSORS_WEIGHTS_NAME, revision=hf_hub_download_kwargs.get("revision", None), repo_type=hf_hub_download_kwargs.get("repo_type", None), token=token, ) use_safetensors = has_remote_safetensors_file if has_remote_safetensors_file: # Priority 1: load safetensors weights filename = hf_hub_download( model_id, SAFETENSORS_WEIGHTS_NAME, **hf_hub_download_kwargs, ) else: try: filename = hf_hub_download(model_id, WEIGHTS_NAME, **hf_hub_download_kwargs) except EntryNotFoundError: raise ValueError( f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. " f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {model_id}." ) if use_safetensors: adapters_weights = safe_load_file(filename, device=device) else: adapters_weights = torch.load(filename, map_location=torch.device(device)) return adapters_weights
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/tuners/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .adaption_prompt import AdaptionPromptConfig, AdaptionPromptModel from .lora import LoraConfig, LoraModel, LoftQConfig from .loha import LoHaConfig, LoHaModel from .lokr import LoKrConfig, LoKrModel from .ia3 import IA3Config, IA3Model from .adalora import AdaLoraConfig, AdaLoraModel from .p_tuning import PromptEncoder, PromptEncoderConfig, PromptEncoderReparameterizationType from .prefix_tuning import PrefixEncoder, PrefixTuningConfig from .prompt_tuning import PromptEmbedding, PromptTuningConfig, PromptTuningInit from .multitask_prompt_tuning import MultitaskPromptEmbedding, MultitaskPromptTuningConfig, MultitaskPromptTuningInit from .oft import OFTConfig, OFTModel from .mixed import MixedModel
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/tuners/lycoris_utils.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from abc import abstractmethod from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Set, Type, Union import torch import torch.nn as nn from tqdm import tqdm from peft.config import PeftConfig from peft.utils import ( ModulesToSaveWrapper, _get_submodules, ) from .tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists @dataclass class LycorisConfig(PeftConfig): r""" A base config for LyCORIS like adapters """ rank_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}" ) }, ) alpha_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `alpha`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}" ) }, ) class LycorisLayer(BaseTunerLayer): r""" A base layer for LyCORIS like adapters """ # adapter_layer_names needs to be defined on the child class other_param_names = ("r", "alpha", "scaling", "rank_dropout", "module_dropout") def __init__(self, base_layer: nn.Module) -> None: self.base_layer = base_layer self.r = {} self.alpha = {} self.scaling = {} self.rank_dropout = {} self.module_dropout = {} # Tuner info self._disable_adapters = False self.merged_adapters = [] @property @abstractmethod def _available_adapters(self) -> Set[str]: ... def _init_empty_weights(self, cls, *args, **kwargs) -> None: # A helper method that allows to initialize the layer of the given class without spending time to initialize the # model weights. The implementation is inspired by # https://pytorch.org/docs/stable/generated/torch.nn.utils.skip_init.html but this function cannot be used # directly. # Instead of this approach, it would be possible to bypass the __init__ of the class but that runs the risk of # omitting important logic inside that __init__. kwargs = kwargs.copy() final_device = kwargs.pop("device", "cpu") cls.__init__(self, *args, device="meta", **kwargs) self.to_empty(device=final_device) @abstractmethod def create_adapter_parameters(self, adapter_name: str, r: int, **kwargs): ... # TODO: refactor LoRA to use the same approach @abstractmethod def _get_delta_activations(self, adapter_name: str, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: """Activations added on top of the base layer output (i.e. after the base layer forward pass)""" @abstractmethod def get_delta_weight(self, adapter_name: str) -> torch.Tensor: ... def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If `None`, all active adapters will be merged. Defaults to `None`. """ if self.merged: warnings.warn( f"Already following adapters were merged {','.join(self.merged_adapters)}. " f"You are now additionally merging {','.join(self.active_adapters)}." ) if adapter_names is None: adapter_names = self.active_adapters for active_adapter in adapter_names: if active_adapter in self._available_adapters: base_layer = self.get_base_layer() if safe_merge: orig_weights = base_layer.weight.data orig_weights += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data += self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) @abstractmethod def reset_adapter_parameters(self, adapter_name: str): ... def set_scale(self, adapter, scale): if adapter not in self._available_adapters: # Ignore the case where the adapter is not in the layer return self.scaling[adapter] = scale * self.alpha[adapter] / self.r[adapter] def scale_layer(self, scale: float) -> None: if scale == 1: return for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue self.scaling[active_adapter] *= scale def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self._available_adapters: self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def unscale_layer(self, scale=None) -> None: for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue if scale is None: self.scaling[active_adapter] = self.alpha[active_adapter] / self.r[active_adapter] else: self.scaling[active_adapter] /= scale @abstractmethod def update_layer(self, adapter_name: str, r: int, alpha: float, **kwargs): ... class LycorisTuner(BaseTuner): r""" A base tuner for LyCORIS like adapters """ prefix: str layers_mapping: Dict[Type[torch.nn.Module], Type[LycorisLayer]] def __init__(self, model, config, adapter_name): super().__init__(model, config, adapter_name) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.model, name) @staticmethod def _check_target_module_exists(config, key): return check_target_module_exists(config, key) @abstractmethod def _create_and_replace( self, config: LycorisConfig, adapter_name: str, target: Union[LycorisLayer, nn.Module], target_name, parent, current_key, **optional_kwargs, ): ... @classmethod def _create_new_module(cls, config: LycorisConfig, adapter_name: str, target: nn.Module, **kwargs) -> LycorisLayer: # Find corresponding subtype of provided target module new_module_cls = None for subtype, target_cls in cls.layers_mapping.items(): if ( hasattr(target, "base_layer") and isinstance(target.get_base_layer(), subtype) and isinstance(target, BaseTunerLayer) ): # nested tuner layers are allowed new_module_cls = target_cls break elif isinstance(target, subtype): new_module_cls = target_cls break # We didn't find corresponding type, so adapter for this layer is not supported if new_module_cls is None: supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys()) raise ValueError( f"Target module of type {type(target)} not supported, " f"currently only adapters for {supported_modules} are supported" ) if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if isinstance(target_base_layer, torch.nn.Conv2d): new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.Linear): new_module = new_module_cls(target, adapter_name=adapter_name, **kwargs) else: supported_modules = ", ".join(layer.__name__ for layer in cls.layers_mapping.keys()) raise ValueError( f"Target module of type {type(target)} not supported, " f"currently only adapters for {supported_modules} are supported" ) return new_module def _mark_only_adapters_as_trainable(self) -> None: for n, p in self.model.named_parameters(): if self.prefix not in n: p.requires_grad = False @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: raise ValueError("Please specify `target_modules` in `peft_config`") return peft_config def _replace_module(self, parent, child_name, new_module, child): setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) # dispatch to correct device for name, module in new_module.named_modules(): if self.prefix in name: module.to(child.weight.device) def _set_adapter_layers(self, enabled=True): for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def _unload_and_optionally_merge( self, merge: bool = True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[List[str]] = None, ): if merge: if getattr(self.model, "quantization_method", None) == "gptq": raise ValueError("Cannot merge LOHA layers when the model is gptq quantized") self._unloading_checks(adapter_names) key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: target.merge(safe_merge=safe_merge, adapter_names=adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` setattr(parent, target_name, target.modules_to_save[target.active_adapter]) return self.model def enable_adapter_layers(self) -> None: """Enable all adapters. Call this if you have previously disabled all adapters and want to re-enable them. """ self._set_adapter_layers(enabled=True) def disable_adapter_layers(self) -> None: """Disable all adapters. When disabling all adapters, the model output corresponds to the output of the base model. """ self._set_adapter_layers(enabled=False) def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[List[str]] = None ) -> torch.nn.Module: r""" This method merges the adapter layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self) -> torch.nn.Module: """ Gets back the base model by removing all the lora modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False) def set_adapter(self, adapter_name: str | list[str]) -> None: """Set the active adapter(s). Args: adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. """ for module in self.model.modules(): if isinstance(module, LycorisLayer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) def delete_adapter(self, adapter_name: str) -> None: """ Deletes an existing adapter. Args: adapter_name (`str`): Name of the adapter to be deleted. """ if adapter_name not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter_name} does not exist") del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, LycorisLayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or []
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/tuners/tuners_utils.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging import re import warnings from abc import ABC, abstractmethod from typing import Any, List, Optional, Union import torch from torch import nn from peft.utils import COMMON_LAYERS_PATTERN from ..config import PeftConfig from ..utils import ModulesToSaveWrapper, _get_submodules logger = logging.getLogger(__name__) class BaseTuner(nn.Module, ABC): r""" A base tuner model that provides the common methods and attributes for all tuners that are injectable into a torch.nn.Module For adding a new Tuner class, one needs to overwrite the following methods: - **_prepare_adapter_config**: A private method to eventually prepare the adapter config, for example in case the field `target_modules` is missing. - **_check_target_module_exists**: A helper private method to check if the passed module's key name matches any of the target modules in the adatper_config. - **_create_and_replace**: A private method to create and replace the target module with the adapter module. - **_check_target_module_exists**: A private helper method to check if the passed module's key name matches any of the target modules in the adatper_config. The easiest is to check what is done in the `peft.tuners.lora.LoraModel` class. Attributes: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. forward (`Callable`): The forward method of the model. peft_config (`Union[`PeftConfig`, dict[str, PeftConfig]]`): The adapter configuration object, it should be a dictionary of `str` to `PeftConfig` objects. One can also pass a PeftConfig object and a new adapter will be created with the default name `adapter` or create a new dictionary with a key `adapter_name` and a value of that peft config. config (`dict[str, Any]`): The model configuration object, it should be a dictionary of `str` to `Any` objects. """ def __init__(self, model, peft_config: Union[PeftConfig, dict[str, PeftConfig]], adapter_name: str) -> None: super().__init__() self.model = model # For advanced developpers, if you want to attach multiple adapters to your # model, just add a `peft_config` dict attribute to your model. if not hasattr(self, "peft_config"): self.peft_config = {adapter_name: peft_config} if isinstance(peft_config, PeftConfig) else peft_config else: logger.info( "Already found a `peft_config` attribute in the model. This will lead to having multiple adapters" " in the model. Make sure to know what you are doing!" ) if isinstance(peft_config, PeftConfig): self.peft_config[adapter_name] = peft_config else: # user is adding a dict of PeftConfigs self.peft_config.update(peft_config) self.active_adapter = adapter_name self.inject_adapter(self.model, adapter_name) # Copy the peft_config in the injected model. self.model.peft_config = self.peft_config @property def active_adapters(self) -> list[str]: if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def forward(self, *args: Any, **kwargs: Any): return self.model.forward(*args, **kwargs) @abstractmethod def _prepare_adapter_config(self, peft_config: PeftConfig, model_config: dict) -> PeftConfig: r""" A private method to eventually prepare the adapter config. For transformers based models, if `peft_config.target_modules` is None, we can automatically infer the target modules from the `TRANSFORMERS_MODELS_TO_XXX_TARGET_MODULES_MAPPING`. This method can be further refactored in the future to automatically infer it for all tuner models. Check out `peft.tuner.lora.LoraModel._prepare_adapter_config` for an example. Args: peft_config (`str`): The adapter config. model_config (`str`): The transformers model config, that config should contain the `model_type` key. """ ... @abstractmethod def _check_target_module_exists(peft_config: PeftConfig, key: str) -> bool: r""" A helper private method to check if the passed module's key name matches any of the target modules in the `peft_config.target_modules` list. If it does, return `True`, else return `False`. Args: peft_config (`PeftConfig`): The adapter config. key (`str`): The module's key name. """ ... @abstractmethod def _create_and_replace( self, peft_config: PeftConfig, adapter_name: str, target: nn.Module, target_name: str, parent: nn.Module, **optional_kwargs: Any, ) -> None: r""" Inplace replacement of the target module with the adapter layer. This method needs to be overriden by all the tuner classes. Check `peft.tuners.lora.LoraModel._create_and_replace` for an example. Args: peft_config (`PeftConfig`): The adapter config. adapter_name (`str`): The adapter name. target (`nn.Module`): The target module. target_name (`str`): The target module's name. parent (`nn.Module`): The parent module. **optional_kwargs (`dict`): The optional keyword arguments to pass to deal with particular cases (e.g. 8bit, 4bit quantization) """ ... @abstractmethod def _mark_only_adapters_as_trainable(self): r""" A helper method to mark only the adapter layers as trainable (i.e. module.requires_grad = False) This needs to be overriden for all tuner classes to match the correct key names. Check `peft.tuners.lora.LoraModel._mark_only_adapters_as_trainable` for an example. """ ... def _check_new_adapter_config(self, config: PeftConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ pass def inject_adapter(self, model: nn.Module, adapter_name: str): r""" Creates adapter layers and replaces the target modules with the adapter layers. This method is called under the hood by `peft.mapping.get_peft_model` if a non-prompt tuning adapter class is passed. The corresponding PEFT config is directly retrieved from the `peft_config` attribute of the BaseTuner class. Args: model (`nn.Module`): The model to be tuned. adapter_name (`str`): The adapter name. """ peft_config = self.peft_config[adapter_name] # Note: If possible, all checks should be performed *at the start of this method*. # This way, we can raise early if something goes wrong, without leaving the model # in a bad (half-initialized) state. self._check_new_adapter_config(peft_config) is_target_modules_in_base_model = False key_list = [key for key, _ in model.named_modules()] _check_for_modules_to_save = getattr(peft_config, "modules_to_save", None) is not None _has_modules_to_save = False model_config = getattr(model, "config", {"model_type": "custom"}) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() peft_config = self._prepare_adapter_config(peft_config, model_config) for key in key_list: # Check for modules_to_save in case if _check_for_modules_to_save and any( key.endswith(f"{module_to_save}") for module_to_save in peft_config.modules_to_save ): # Optionally set the modules to save parent, target, target_name = _get_submodules(model, key) if not isinstance(target, ModulesToSaveWrapper): new_module = ModulesToSaveWrapper(target, adapter_name) setattr(parent, target_name, new_module) else: target.update(adapter_name) _has_modules_to_save = True continue if not self._check_target_module_exists(peft_config, key): continue is_target_modules_in_base_model = True parent, target, target_name = _get_submodules(model, key) optional_kwargs = { "loaded_in_8bit": getattr(model, "is_loaded_in_8bit", False), "loaded_in_4bit": getattr(model, "is_loaded_in_4bit", False), "current_key": key, } self._create_and_replace(peft_config, adapter_name, target, target_name, parent, **optional_kwargs) if not is_target_modules_in_base_model: raise ValueError( f"Target modules {peft_config.target_modules} not found in the base model. " f"Please check the target modules and try again." ) self._mark_only_adapters_as_trainable() if self.peft_config[adapter_name].inference_mode: for n, p in self.model.named_parameters(): if adapter_name in n: p.requires_grad = False if _has_modules_to_save: if not hasattr(model, "modules_to_save"): model.modules_to_save = set(peft_config.modules_to_save) else: model.modules_to_save.update(set(peft_config.modules_to_save)) def merge_adapter(self, adapter_names: Optional[list[str]] = None) -> None: """ This method merges the adapter layers into the base model. Merging adapters can lead to a speed up of the forward pass. A copy of the adapter weights is still kept in memory, which is required to unmerge the adapters. In order to merge the adapter weights without keeping them in memory, please call `merge_and_unload`. Args: safe_merge (`bool`, *optional*): If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If `None`, all active adapters will be merged. Defaults to `None`. """ for module in self.model.modules(): if isinstance(module, BaseTunerLayer): module.merge(adapter_names=adapter_names) def unmerge_adapter(self): """ This method unmerges all merged adapter layers from the base model. """ for module in self.model.modules(): if isinstance(module, BaseTunerLayer): module.unmerge() def _unloading_checks(self, adapter_names: Optional[List[str]]): adapters_to_consider = adapter_names or self.active_adapters is_modules_to_save_available = any( self.peft_config[adapter].modules_to_save for adapter in adapters_to_consider ) if is_modules_to_save_available and len(adapters_to_consider) > 1: raise ValueError("Cannot unload multiple adapters that specify `modules_to_save`.") class BaseTunerLayer(ABC): r""" A tuner layer mixin that provides the common methods and attributes for all tuners. Args: is_plugable (`bool`, *optional*): Whether the adapter layer can be plugged to any pytorch module active_adapters (Union[List[`str`], `str`], *optional*): The name of the active adapter. """ active_adapter = None # All names of layers that may contain adapter (trainable) weights adapter_layer_names: tuple[str] = () # All names of other parameters that may contain adapter-related parameters other_param_names: tuple[str] = () # indicates whether all adapters should be disabled _disable_adapters: bool = False # the currently active adapter(s) _active_adapter: str | list[str] = "default" # List all merged adapters merged_adapters: list[str] = [] def get_base_layer(self) -> nn.Module: """ (Recursively) get the base_layer. This is necessary for the case that the tuner layer wraps another tuner layer. """ base_layer = self while hasattr(base_layer, "base_layer"): base_layer = base_layer.base_layer return base_layer @property def weight(self) -> torch.Tensor: # This is required for some transformers code, e.g. for T5, weight is accessed as: # self.wo.weight # where "wo" is the adapter layer. # https://github.com/huggingface/transformers/blob/78f6ed6c70b29c1560780e3869a7ad4c6b3d2710/src/transformers # /models/t5/modeling_t5.py#L292 base_layer = self.get_base_layer() if hasattr(base_layer, "qweight"): # QuantLinear weight = base_layer.qweight else: # Other layers weight = base_layer.weight return weight def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: raise NotImplementedError def unmerge(self) -> None: raise NotImplementedError @property def merged(self) -> bool: return bool(self.merged_adapters) @property def disable_adapters(self) -> bool: # use a property to ensure that disable_adapters is not set directly, instead use the enable_adapters method return self._disable_adapters @property def active_adapter(self) -> str: # use a property to ensure that active_adapter is not set directly, instead use the set_adapter method return self._active_adapter @property def active_adapters(self): if isinstance(self.active_adapter, str): return [self.active_adapter] # is already a list of str return self.active_adapter def enable_adapters(self, enabled: bool) -> None: """Toggle the enabling and disabling of adapters Takes care of setting the requires_grad flag for the adapter weights. Args: enabled (bool): True to enable adapters, False to disable adapters """ if enabled: self.set_adapter(self.active_adapters) self._disable_adapters = False else: # disable grads on all adapter layers for layer_name in self.adapter_layer_names: layer = getattr(self, layer_name) layer.requires_grad_(False) self._disable_adapters = True def set_adapter(self, adapter_names: str | list[str]) -> None: """Set the active adapter(s). Args: adapter_name (`str` or `List[str]`): Name of the adapter(s) to be activated. """ if isinstance(adapter_names, str): adapter_names = [adapter_names] # Deactivate grads on the inactive adapter and activate grads on the active adapter for layer_name in self.adapter_layer_names: module_dict = getattr(self, layer_name) for key, layer in module_dict.items(): if key in adapter_names: # Note: It is possible that not a single layer is called with requires_grad_(True) here. This may # happen if a completely different adapter layer is being activated. layer.requires_grad_(True) else: layer.requires_grad_(False) self._active_adapter = adapter_names def _all_available_adapter_names(self) -> list[str]: """Return a sorted list of all available adapter names""" adapter_names = set() for name in self.adapter_layer_names + self.other_param_names: # we check each possible attribute and if it's a dict or ModuleDict, we assume that the keys are the adapter # names attr = getattr(self, name) if hasattr(attr, "keys"): adapter_names.update(attr.keys()) return sorted(adapter_names) def delete_adapter(self, adapter_name: str) -> None: """ Delete an adapter from the layer This should be called on all adapter layers, or else we will get an inconsistent state. This method will also set a new active adapter if the deleted adapter was an active adapter. It is important that the new adapter is chosen in a deterministic way, so that the same adapter is chosen on all layers. Args: adapter_name (`str`): The name of the adapter to delete """ for attr in self.adapter_layer_names + self.other_param_names: if adapter_name in getattr(self, attr): del getattr(self, attr)[adapter_name] if adapter_name in self.active_adapters: # choose a new active adapter active_adapters = self.active_adapters[:] active_adapters.remove(adapter_name) if active_adapters: self.set_adapter(active_adapters) else: # no active adapters left, set a new default adapter # here we get the list of all adapters existing adapter names and choose the first one remaining_adapters = self._all_available_adapter_names() if not remaining_adapters: self.set_adapter([]) else: new_active_adapter = remaining_adapters[0] warnings.warn( f"Adapter {adapter_name} was active which is now deleted. Setting active adapter to " f"{new_active_adapter}." ) self.set_adapter(remaining_adapters[0]) def check_target_module_exists(config, key: str) -> bool | re.Match[str] | None: """A helper method to check if the passed module's key name matches any of the target modules in the adapter_config. Args: config (`LoraConfig` | `LycorisConfig`): A config to match target modules from key (`str`): A key to search any matches in config Returns: `bool` | `re.Match[str]` | `None`: True of match object if key matches any target modules from config, False or None if no match found """ if isinstance(config.target_modules, str): target_module_found = re.fullmatch(config.target_modules, key) else: target_module_found = key in config.target_modules or any( key.endswith(f".{target_key}") for target_key in config.target_modules ) is_using_layer_indexes = getattr(config, "layers_to_transform", None) is not None layer_indexing_pattern = getattr(config, "layers_pattern", None) if is_using_layer_indexes and target_module_found: layers_pattern = COMMON_LAYERS_PATTERN if layer_indexing_pattern is None else layer_indexing_pattern layers_pattern = [layers_pattern] if isinstance(layers_pattern, str) else layers_pattern for pattern in layers_pattern: layer_index = re.match(f".*.{pattern}\.(\d+)\.*", key) if layer_index is not None: layer_index = int(layer_index.group(1)) if isinstance(config.layers_to_transform, int): target_module_found = layer_index == config.layers_to_transform else: target_module_found = layer_index in config.layers_to_transform break else: target_module_found = False return target_module_found def inspect_matched_modules(tuner: BaseTuner, adapter_name: str = "default") -> dict: """ A helper function to inspect the set of matched and unmatched modules for a PEFT model and the given adapter. """ config = tuner.peft_config[adapter_name] key_list = [key for key, _ in tuner.model.named_modules()] module_dict = {"matched": [], "unmatched": []} for key in key_list: if tuner._check_target_module_exists(config, key): module_dict["matched"].append(key) else: module_dict["unmatched"].append(key) return module_dict
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/prompt_tuning/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from dataclasses import dataclass, field from typing import Optional, Union from peft.config import PromptLearningConfig from peft.utils import PeftType class PromptTuningInit(str, enum.Enum): TEXT = "TEXT" RANDOM = "RANDOM" @dataclass class PromptTuningConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PromptEmbedding`]. Args: prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding. prompt_tuning_init_text (`str`, *optional*): The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`. tokenizer_name_or_path (`str`, *optional*): The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`. tokenizer_kwargs (`dict`, *optional*): The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if `prompt_tuning_init` is `TEXT`. """ prompt_tuning_init: Union[PromptTuningInit, str] = field( default=PromptTuningInit.RANDOM, metadata={"help": "How to initialize the prompt tuning parameters"}, ) prompt_tuning_init_text: Optional[str] = field( default=None, metadata={ "help": "The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) tokenizer_kwargs: Optional[dict] = field( default=None, metadata={ "help": ( "The keyword arguments to pass to `AutoTokenizer.from_pretrained`. Only used if prompt_tuning_init is " "`TEXT`" ), }, ) def __post_init__(self): self.peft_type = PeftType.PROMPT_TUNING if self.tokenizer_kwargs and (self.prompt_tuning_init != PromptTuningInit.TEXT): raise ValueError( f"tokenizer_kwargs only valid when using prompt_tuning_init='{PromptTuningInit.TEXT.value}'." )
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/prompt_tuning/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .config import PromptTuningConfig, PromptTuningInit from .model import PromptEmbedding __all__ = ["PromptTuningConfig", "PromptEmbedding", "PromptTuningInit"]
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/prompt_tuning/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import torch from .config import PromptTuningInit class PromptEmbedding(torch.nn.Module): """ The model to encode virtual tokens into prompt embeddings. Args: config ([`PromptTuningConfig`]): The configuration of the prompt embedding. word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model. **Attributes**: - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding. Example: ```py >>> from peft import PromptEmbedding, PromptTuningConfig >>> config = PromptTuningConfig( ... peft_type="PROMPT_TUNING", ... task_type="SEQ_2_SEQ_LM", ... num_virtual_tokens=20, ... token_dim=768, ... num_transformer_submodules=1, ... num_attention_heads=12, ... num_layers=12, ... prompt_tuning_init="TEXT", ... prompt_tuning_init_text="Predict if sentiment of this review is positive, negative or neutral", ... tokenizer_name_or_path="t5-base", ... ) >>> # t5_model.shared is the word embeddings of the base model >>> prompt_embedding = PromptEmbedding(config, t5_model.shared) ``` Input Shape: (`batch_size`, `total_virtual_tokens`) Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`) """ def __init__(self, config, word_embeddings): super().__init__() total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim) if config.prompt_tuning_init == PromptTuningInit.TEXT: from transformers import AutoTokenizer tokenizer_kwargs = config.tokenizer_kwargs or {} tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path, **tokenizer_kwargs) init_text = config.prompt_tuning_init_text init_token_ids = tokenizer(init_text)["input_ids"] # Trim or iterate until num_text_tokens matches total_virtual_tokens num_text_tokens = len(init_token_ids) if num_text_tokens > total_virtual_tokens: init_token_ids = init_token_ids[:total_virtual_tokens] elif num_text_tokens < total_virtual_tokens: num_reps = math.ceil(total_virtual_tokens / num_text_tokens) init_token_ids = init_token_ids * num_reps init_token_ids = init_token_ids[:total_virtual_tokens] init_token_ids = torch.LongTensor(init_token_ids).to(word_embeddings.weight.device) word_embedding_weights = word_embeddings(init_token_ids).detach().clone() word_embedding_weights = word_embedding_weights.to(torch.float32) self.embedding.weight = torch.nn.Parameter(word_embedding_weights) def forward(self, indices): # Just get embeddings prompt_embeddings = self.embedding(indices) return prompt_embeddings
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/loha/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import List, Optional, Union from peft.tuners.lycoris_utils import LycorisConfig from peft.utils import PeftType @dataclass class LoHaConfig(LycorisConfig): """ This is the configuration class to store the configuration of a [`LoHaModel`]. Args: r (`int`): LoHa rank. alpha (`int`): The alpha parameter for LoHa scaling. rank_dropout (`int`): The dropout probability for rank dimension during training. module_dropout (`int`): The dropout probability for disabling LoHa modules during training. use_effective_conv2d (`bool`): Use parameter effective decomposition for Conv2d with ksize > 1 ("Proposition 3" from FedPara paper). target_modules (`Union[List[str],str]`): The names of the modules to apply LoHa to. init_weights (`bool`): Whether to perform initialization of LoHa weights. layers_to_transform (`Union[List[int],int]`): The layer indexes to transform, if this argument is specified, it will apply the LoHa transformations on the layer indexes that are specified in this list. If a single integer is passed, it will apply the LoHa transformations on the layer at this index. layers_pattern (`str`): The layer pattern name, used only if `layers_to_transform` is different from `None` and if the layer pattern is not in the common layers pattern. rank_pattern (`dict`): The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. alpha_pattern (`dict`): The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `alpha`. modules_to_save (`List[str]`): The names of modules to be set as trainable except LoHa parameters. """ r: int = field(default=8, metadata={"help": "LoHa rank"}) alpha: int = field(default=8, metadata={"help": "LoHa alpha"}) rank_dropout: float = field( default=0.0, metadata={"help": "The dropout probability for rank dimension during training"} ) module_dropout: float = field( default=0.0, metadata={"help": "The dropout probability for disabling LoHa modules during training"} ) use_effective_conv2d: bool = field( default=False, metadata={ "help": 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)' }, ) target_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with LoHa." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " }, ) init_weights: bool = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the LoHa layers with their default initialization. Don't change " "this setting, except if you know exactly what you're doing." ), }, ) layers_to_transform: Optional[Union[List[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index." }, ) layers_pattern: Optional[str] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." }, ) modules_to_save: Optional[List[str]] = field( default=None, metadata={ "help": "List of modules apart from LoHA layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) def __post_init__(self): self.peft_type = PeftType.LOHA self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules )
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/loha/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .config import LoHaConfig from .layer import Conv2d, Linear, LoHaLayer from .model import LoHaModel __all__ = ["LoHaConfig", "LoHaModel", "Conv2d", "Linear", "LoHaLayer"]
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/loha/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from itertools import chain from typing import Dict, Type, Union import torch from torch import nn from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner from .layer import Conv2d, Linear, LoHaLayer class LoHaModel(LycorisTuner): """ Creates Low-Rank Hadamard Product model from a pretrained model. The method is partially described in https://arxiv.org/abs/2108.06098 Current implementation heavily borrows from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py Args: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. config ([`LoHaConfig`]): The configuration of the LoHa model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. Returns: `torch.nn.Module`: The LoHa model. Example: ```py >>> from diffusers import StableDiffusionPipeline >>> from peft import LoHaModel, LoHaConfig >>> config_te = LoHaConfig( ... r=8, ... lora_alpha=32, ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], ... rank_dropout=0.0, ... module_dropout=0.0, ... init_weights=True, ... ) >>> config_unet = LoHaConfig( ... r=8, ... lora_alpha=32, ... target_modules=[ ... "proj_in", ... "proj_out", ... "to_k", ... "to_q", ... "to_v", ... "to_out.0", ... "ff.net.0.proj", ... "ff.net.2", ... ], ... rank_dropout=0.0, ... module_dropout=0.0, ... init_weights=True, ... use_effective_conv2d=True, ... ) >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> model.text_encoder = LoHaModel(model.text_encoder, config_te, "default") >>> model.unet = LoHaModel(model.unet, config_unet, "default") ``` **Attributes**: - **model** ([`~torch.nn.Module`]) -- The model to be adapted. - **peft_config** ([`LoHaConfig`]): The configuration of the LoHa model. """ prefix: str = "hada_" layers_mapping: Dict[Type[torch.nn.Module], Type[LoHaLayer]] = { torch.nn.Conv2d: Conv2d, torch.nn.Linear: Linear, } def _create_and_replace( self, config: LycorisConfig, adapter_name: str, target: Union[LoHaLayer, nn.Module], target_name: str, parent: nn.Module, current_key: str, **optional_kwargs, ) -> None: """ A private method to create and replace the target module with the adapter module. """ # Regexp matching - Find key which matches current target_name in patterns provided pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys())) target_name_key = next(filter(lambda key: re.match(f"(.*\.)?{key}$", current_key), pattern_keys), target_name) kwargs = config.to_dict() kwargs["r"] = config.rank_pattern.get(target_name_key, config.r) kwargs["alpha"] = config.alpha_pattern.get(target_name_key, config.alpha) if isinstance(target, LoHaLayer): target.update_layer(adapter_name, **kwargs) else: new_module = self._create_new_module(config, adapter_name, target, **kwargs) self._replace_module(parent, target_name, new_module, target)
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/loha/layer.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any, Set, Tuple import torch import torch.nn as nn import torch.nn.functional as F from peft.tuners.lycoris_utils import LycorisLayer class LoHaLayer(nn.Module, LycorisLayer): # All names of layers that may contain adapter weights adapter_layer_names = ("hada_w1_a", "hada_w1_b", "hada_w2_a", "hada_w2_b", "hada_t1", "hada_t2") # other_param_names is defined on parent class def __init__(self, base_layer: nn.Module): super().__init__() LycorisLayer.__init__(self, base_layer) # LoHa info self.hada_w1_a = nn.ParameterDict({}) self.hada_w1_b = nn.ParameterDict({}) self.hada_w2_a = nn.ParameterDict({}) self.hada_w2_b = nn.ParameterDict({}) self.hada_t1 = nn.ParameterDict({}) self.hada_t2 = nn.ParameterDict({}) @property def _available_adapters(self) -> Set[str]: return {*self.hada_w1_a, *self.hada_w1_b, *self.hada_w2_a, *self.hada_w2_b, *self.hada_t1, *self.hada_t2} def create_adapter_parameters(self, adapter_name: str, r: int, shape: Tuple[int, ...]): # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L130C9-L143C75 if len(shape) == 4: self.hada_t1[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3])) self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode self.hada_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3])) self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0])) # out_dim, 1-mode self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) # in_dim , 2-mode else: self.hada_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r)) self.hada_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) self.hada_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0], r)) self.hada_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1])) def reset_adapter_parameters(self, adapter_name: str): # Original implementation performs initialization with normal distribution # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158 # FedPara paper proposes to perform He initialization, let's stick with it # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization if adapter_name in self.hada_w1_a.keys(): nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5)) nn.init.zeros_(self.hada_w2_b[adapter_name]) if adapter_name in self.hada_t1.keys(): nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5)) def reset_adapter_parameters_random(self, adapter_name: str): # Original implementation performs initialization with normal distribution # https://github.com/KohakuBlueleaf/LyCORIS/blob/3549fdef8f564761d68b695a08ef88b1122fdedc/lycoris/modules/loha.py#L158 # FedPara paper proposes to perform He initialization, let's stick with it # It is enough to initialize only single matrix with zeros to make adapter do nothing after initialization if adapter_name in self.hada_w1_a.keys(): nn.init.kaiming_uniform_(self.hada_w1_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w1_b[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w2_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_w2_b[adapter_name], a=math.sqrt(5)) if adapter_name in self.hada_t1.keys(): nn.init.kaiming_uniform_(self.hada_t1[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.hada_t2[adapter_name], a=math.sqrt(5)) def update_layer( self, adapter_name: str, r: int, alpha: float, rank_dropout: float, module_dropout: float, init_weights: bool, use_effective_conv2d: bool = False, **kwargs, ) -> None: """Internal function to create loha adapter Args: adapter_name (`str`): Name for the adapter to add. r (`int`): Rank for the added adapter. alpha (`float`): Alpha for the added adapter. rank_dropout (`float`): The dropout probability for rank dimension during training. module_dropout (`float`): The dropout probability for disabling adapter during training. init_weights (`bool`): Whether to initialize weights. use_effective_conv2d (`bool`, *optional*, defaults to `False`): Use parameter effective decomposition for Conv2d with ksize > 1. """ self.r[adapter_name] = r self.alpha[adapter_name] = alpha self.scaling[adapter_name] = alpha / r self.rank_dropout[adapter_name] = rank_dropout self.module_dropout[adapter_name] = module_dropout # Determine shape of LoHa weights base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): shape = tuple(base_layer.weight.shape) elif isinstance(base_layer, nn.Conv2d): use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1) if use_effective_conv2d: shape = (base_layer.out_channels, base_layer.in_channels, *base_layer.kernel_size) else: shape = ( base_layer.out_channels, base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1], ) else: raise TypeError(f"LoHa is not implemented for base layers of type {type(base_layer).__name__}") # Create weights with provided shape self.create_adapter_parameters(adapter_name, r, shape) # Initialize weights if init_weights: self.reset_adapter_parameters(adapter_name) else: self.reset_adapter_parameters_random(adapter_name) # Move new weights to device weight = getattr(self.get_base_layer(), "weight", None) if weight is not None: # the layer is already completely initialized, this is an update if weight.dtype.is_floating_point or weight.dtype.is_complex: self.to(weight.device, dtype=weight.dtype) else: self.to(weight.device) self.set_adapter(self.active_adapters) def get_delta_weight(self, adapter_name: str) -> torch.Tensor: # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L178 if adapter_name in self.hada_t1.keys(): weight = make_weight_cp( self.hada_t1[adapter_name], self.hada_w1_a[adapter_name], self.hada_w1_b[adapter_name], self.hada_t2[adapter_name], self.hada_w2_a[adapter_name], self.hada_w2_b[adapter_name], scale=torch.tensor(self.scaling[adapter_name]), ) else: weight = make_weight( self.hada_w1_a[adapter_name], self.hada_w1_b[adapter_name], self.hada_w2_a[adapter_name], self.hada_w2_b[adapter_name], scale=torch.tensor(self.scaling[adapter_name]), ) base_layer = self.get_base_layer() weight = weight.reshape(base_layer.weight.shape) # Perform rank dropout during training - drop rows of addition weights rank_dropout = self.rank_dropout[adapter_name] if self.training and rank_dropout: drop = (torch.rand(weight.size(0)) > rank_dropout).to(weight.dtype) drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device) # TODO: Investigate if there should be a scaler like in normal dropout during training # Original implementation doesn't have it # https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L193 drop /= drop.mean() weight *= drop return weight def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) # Execute all the adapters for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue module_dropout = self.module_dropout[active_adapter] # Modify current execution weights if (not self.training) or (self.training and torch.rand(1) > module_dropout): result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs) result = result.to(previous_dtype) return result class Linear(LoHaLayer): """LoHa implemented in Linear layer""" def __init__( self, base_layer: nn.Module, adapter_name: str = "default", r: int = 0, alpha: float = 0.0, rank_dropout: float = 0.0, module_dropout: float = 0.0, init_weights: bool = True, **kwargs, ): super().__init__(base_layer) # Create adapter and set it active self._active_adapter = adapter_name self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs) def _get_delta_activations( self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any ) -> torch.Tensor: delta_weight = self.get_delta_weight(adapter_name) # don't add bias here, because the bias is already included in the output of the base_layer return F.linear(input, delta_weight) def __repr__(self) -> str: rep = super().__repr__() return "loha." + rep class Conv2d(LoHaLayer): """LoHa implemented in Conv2d layer""" def __init__( self, base_layer: nn.Module, adapter_name: str = "default", r: int = 0, alpha: float = 0.0, rank_dropout: float = 0.0, module_dropout: float = 0.0, use_effective_conv2d: bool = False, init_weights: bool = True, **kwargs, ): super().__init__(base_layer) # Create adapter and set it active self._active_adapter = adapter_name self.update_layer( adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs ) def _get_delta_activations( self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any ) -> torch.Tensor: delta_weight = self.get_delta_weight(adapter_name) # don't add bias here, because the bias is already included in the output of the base_layer base_layer = self.get_base_layer() return F.conv2d( input, delta_weight, stride=base_layer.stride, padding=base_layer.padding, dilation=base_layer.dilation, groups=base_layer.groups, ) def __repr__(self) -> str: rep = super().__repr__() return "loha." + rep # Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py#L9 class HadaWeight(torch.autograd.Function): @staticmethod def forward(ctx, w1a, w1b, w2a, w2b, scale=torch.tensor(1)): ctx.save_for_backward(w1a, w1b, w2a, w2b, scale) diff_weight = ((w1a @ w1b) * (w2a @ w2b)) * scale return diff_weight @staticmethod def backward(ctx, grad_out): (w1a, w1b, w2a, w2b, scale) = ctx.saved_tensors grad_out = grad_out * scale temp = grad_out * (w2a @ w2b) grad_w1a = temp @ w1b.T grad_w1b = w1a.T @ temp temp = grad_out * (w1a @ w1b) grad_w2a = temp @ w2b.T grad_w2b = w2a.T @ temp del temp return grad_w1a, grad_w1b, grad_w2a, grad_w2b, None class HadaWeightCP(torch.autograd.Function): @staticmethod def forward(ctx, t1, w1a, w1b, t2, w2a, w2b, scale=torch.tensor(1)): ctx.save_for_backward(t1, w1a, w1b, t2, w2a, w2b, scale) rebuild1 = torch.einsum("i j k l, j r, i p -> p r k l", t1, w1b, w1a) rebuild2 = torch.einsum("i j k l, j r, i p -> p r k l", t2, w2b, w2a) return rebuild1 * rebuild2 * scale @staticmethod def backward(ctx, grad_out): (t1, w1a, w1b, t2, w2a, w2b, scale) = ctx.saved_tensors grad_out = grad_out * scale temp = torch.einsum("i j k l, j r -> i r k l", t2, w2b) rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w2a) grad_w = rebuild * grad_out del rebuild grad_w1a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w) grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w1a.T) del grad_w, temp grad_w1b = torch.einsum("i r k l, i j k l -> r j", t1, grad_temp) grad_t1 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w1b.T) del grad_temp temp = torch.einsum("i j k l, j r -> i r k l", t1, w1b) rebuild = torch.einsum("i j k l, i r -> r j k l", temp, w1a) grad_w = rebuild * grad_out del rebuild grad_w2a = torch.einsum("r j k l, i j k l -> r i", temp, grad_w) grad_temp = torch.einsum("i j k l, i r -> r j k l", grad_w, w2a.T) del grad_w, temp grad_w2b = torch.einsum("i r k l, i j k l -> r j", t2, grad_temp) grad_t2 = torch.einsum("i j k l, j r -> i r k l", grad_temp, w2b.T) del grad_temp return grad_t1, grad_w1a, grad_w1b, grad_t2, grad_w2a, grad_w2b, None def make_weight(w1a, w1b, w2a, w2b, scale): return HadaWeight.apply(w1a, w1b, w2a, w2b, scale) def make_weight_cp(t1, w1a, w1b, t2, w2a, w2b, scale): return HadaWeightCP.apply(t1, w1a, w1b, t2, w2a, w2b, scale)
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/lokr/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import List, Optional, Union from peft.tuners.lycoris_utils import LycorisConfig from peft.utils import PeftType @dataclass class LoKrConfig(LycorisConfig): """ Configuration class of [`LoKrModel`]. Args: r (`int`): LoKr rank. alpha (`int`): The alpha parameter for LoKr scaling. rank_dropout (`int`): The dropout probability for rank dimension during training. module_dropout (`int`): The dropout probability for disabling LoKr modules during training. use_effective_conv2d (`bool`): Use parameter effective decomposition for Conv2d with ksize > 1 ("Proposition 3" from FedPara paper). decompose_both (`bool`): Perform rank decomposition of left kronecker product matrix. decompose_factor (`int`): Kronecker product decomposition factor. target_modules (`Union[List[str],str]`): The names of the modules to apply LoKr to. init_weights (`bool`): Whether to perform initialization of LoKr weights. layers_to_transform (`Union[List[int],int]`): The layer indexes to transform, if this argument is specified, it will apply the LoKr transformations on the layer indexes that are specified in this list. If a single integer is passed, it will apply the LoKr transformations on the layer at this index. layers_pattern (`str`): The layer pattern name, used only if `layers_to_transform` is different from `None` and if the layer pattern is not in the common layers pattern. rank_pattern (`dict`): The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. alpha_pattern (`dict`): The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `alpha`. modules_to_save (`List[str]`): The names of modules to be set as trainable except LoKr parameters. """ r: int = field(default=8, metadata={"help": "LoKr rank"}) alpha: int = field(default=8, metadata={"help": "LoKr alpha"}) rank_dropout: float = field( default=0.0, metadata={"help": "The dropout probability for rank dimension during training"} ) module_dropout: float = field( default=0.0, metadata={"help": "The dropout probability for disabling LoKr modules during training"} ) use_effective_conv2d: bool = field( default=False, metadata={ "help": 'Use parameter effective decomposition for Conv2d 3x3 with ksize > 1 ("Proposition 3" from FedPara paper)' }, ) decompose_both: bool = field( default=False, metadata={"help": "Perform rank decomposition of left kronecker product matrix."}, ) decompose_factor: int = field(default=-1, metadata={"help": "Kronecker product decomposition factor."}) target_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with LoKr." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " }, ) init_weights: bool = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the LoKr layers with their default initialization. Don't change " "this setting, except if you know exactly what you're doing." ), }, ) layers_to_transform: Optional[Union[List[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index." }, ) layers_pattern: Optional[str] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." }, ) modules_to_save: Optional[List[str]] = field( default=None, metadata={ "help": "List of modules apart from LoKr layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) def __post_init__(self): self.peft_type = PeftType.LOKR
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/lokr/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .config import LoKrConfig from .layer import Conv2d, Linear, LoKrLayer from .model import LoKrModel __all__ = ["LoKrConfig", "LoKrModel", "Conv2d", "Linear", "LoKrLayer"]
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/lokr/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from itertools import chain from typing import Dict, Type, Union import torch from torch import nn from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner from .layer import Conv2d, Linear, LoKrLayer class LoKrModel(LycorisTuner): """ Creates Low-Rank Kronecker Product model from a pretrained model. The original method is partially described in https://arxiv.org/abs/2108.06098 and in https://arxiv.org/abs/2309.14859 Current implementation heavily borrows from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py Args: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. config ([`LoKrConfig`]): The configuration of the LoKr model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. Returns: `torch.nn.Module`: The LoKr model. Example: ```py >>> from diffusers import StableDiffusionPipeline >>> from peft import LoKrModel, LoKrConfig >>> config_te = LoKrConfig( ... r=8, ... lora_alpha=32, ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], ... rank_dropout=0.0, ... module_dropout=0.0, ... init_weights=True, ... ) >>> config_unet = LoKrConfig( ... r=8, ... lora_alpha=32, ... target_modules=[ ... "proj_in", ... "proj_out", ... "to_k", ... "to_q", ... "to_v", ... "to_out.0", ... "ff.net.0.proj", ... "ff.net.2", ... ], ... rank_dropout=0.0, ... module_dropout=0.0, ... init_weights=True, ... use_effective_conv2d=True, ... ) >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> model.text_encoder = LoKrModel(model.text_encoder, config_te, "default") >>> model.unet = LoKrModel(model.unet, config_unet, "default") ``` **Attributes**: - **model** ([`~torch.nn.Module`]) -- The model to be adapted. - **peft_config** ([`LoKrConfig`]): The configuration of the LoKr model. """ prefix: str = "lokr_" layers_mapping: Dict[Type[torch.nn.Module], Type[LoKrLayer]] = { torch.nn.Conv2d: Conv2d, torch.nn.Linear: Linear, } def _create_and_replace( self, config: LycorisConfig, adapter_name: str, target: Union[LoKrLayer, nn.Module], target_name: str, parent: nn.Module, current_key: str, **optional_kwargs, ) -> None: """ A private method to create and replace the target module with the adapter module. """ # Regexp matching - Find key which matches current target_name in patterns provided pattern_keys = list(chain(config.rank_pattern.keys(), config.alpha_pattern.keys())) target_name_key = next(filter(lambda key: re.match(f"(.*\.)?{key}$", current_key), pattern_keys), target_name) kwargs = config.to_dict() kwargs["r"] = config.rank_pattern.get(target_name_key, config.r) kwargs["alpha"] = config.alpha_pattern.get(target_name_key, config.alpha) if isinstance(target, LoKrLayer): target.update_layer(adapter_name, **kwargs) else: new_module = self._create_new_module(config, adapter_name, target, **kwargs) self._replace_module(parent, target_name, new_module, target)
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/lokr/layer.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any, Optional, Set, Tuple, Union import torch import torch.nn as nn import torch.nn.functional as F from peft.tuners.lycoris_utils import LycorisLayer class LoKrLayer(nn.Module, LycorisLayer): # All names of layers that may contain adapter weights adapter_layer_names = ( "lokr_w1", "lokr_w1_a", "lokr_w1_b", "lokr_w2", "lokr_w2_a", "lokr_w2_b", "lokr_t2", ) # other_param_names is defined on parent class def __init__(self, base_layer: nn.Module) -> None: super().__init__() LycorisLayer.__init__(self, base_layer) # LoKr info self.lokr_w1 = nn.ParameterDict({}) self.lokr_w1_a = nn.ParameterDict({}) self.lokr_w1_b = nn.ParameterDict({}) self.lokr_w2 = nn.ParameterDict({}) self.lokr_w2_a = nn.ParameterDict({}) self.lokr_w2_b = nn.ParameterDict({}) self.lokr_t2 = nn.ParameterDict({}) @property def _available_adapters(self) -> Set[str]: return { *self.lokr_w1, *self.lokr_w1_a, *self.lokr_w1_b, *self.lokr_w2, *self.lokr_w2_a, *self.lokr_w2_b, *self.lokr_t2, } def create_adapter_parameters( self, adapter_name: str, r: int, shape, use_w1: bool, use_w2: bool, use_effective_conv2d: bool, ): if use_w1: self.lokr_w1[adapter_name] = nn.Parameter(torch.empty(shape[0][0], shape[1][0])) else: self.lokr_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0][0], r)) self.lokr_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][0])) if len(shape) == 4: # Conv2d if use_w2: self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1], *shape[2:])) elif use_effective_conv2d: self.lokr_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3])) self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0][1])) # b, 1-mode self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1])) # d, 2-mode else: self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r)) self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1] * shape[2] * shape[3])) else: # Linear if use_w2: self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1])) else: self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r)) self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1])) def reset_adapter_parameters(self, adapter_name: str): if adapter_name in self.lokr_w1: nn.init.zeros_(self.lokr_w1[adapter_name]) else: nn.init.zeros_(self.lokr_w1_a[adapter_name]) nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5)) if adapter_name in self.lokr_w2: nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5)) else: nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5)) if adapter_name in self.lokr_t2: nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5)) def reset_adapter_parameters_random(self, adapter_name: str): if adapter_name in self.lokr_w1: nn.init.kaiming_uniform_(self.lokr_w1[adapter_name], a=math.sqrt(5)) else: nn.init.kaiming_uniform_(self.lokr_w1_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5)) if adapter_name in self.lokr_w2: nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5)) else: nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5)) nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5)) if adapter_name in self.lokr_t2: nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5)) def update_layer( self, adapter_name: str, r: int, alpha: float, rank_dropout: float, module_dropout: float, init_weights: bool, use_effective_conv2d: bool, decompose_both: bool, decompose_factor: int, **kwargs, ) -> None: """Internal function to create lokr adapter Args: adapter_name (`str`): Name for the adapter to add. r (`int`): Rank for the added adapter. alpha (`float`): Alpha for the added adapter. rank_dropout (`float`): The dropout probability for rank dimension during training module_dropout (`float`): The dropout probability for disabling adapter during training. init_weights (`bool`): Whether to initialize adapter weights. use_effective_conv2d (`bool`): Use parameter effective decomposition for Conv2d with ksize > 1. decompose_both (`bool`): Perform rank decomposition of left kronecker product matrix. decompose_factor (`int`): Kronecker product decomposition factor. """ self.r[adapter_name] = r self.alpha[adapter_name] = alpha self.scaling[adapter_name] = alpha / r self.rank_dropout[adapter_name] = rank_dropout self.module_dropout[adapter_name] = module_dropout base_layer = self.get_base_layer() # Determine shape of LoKr weights if isinstance(base_layer, nn.Linear): in_dim, out_dim = base_layer.in_features, base_layer.out_features in_m, in_n = factorization(in_dim, decompose_factor) out_l, out_k = factorization(out_dim, decompose_factor) shape = ((out_l, out_k), (in_m, in_n)) # ((a, b), (c, d)), out_dim = a*c, in_dim = b*d use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2) use_w2 = not (r < max(shape[0][1], shape[1][1]) / 2) use_effective_conv2d = False elif isinstance(base_layer, nn.Conv2d): in_dim, out_dim = base_layer.in_channels, base_layer.out_channels k_size = base_layer.kernel_size in_m, in_n = factorization(in_dim, decompose_factor) out_l, out_k = factorization(out_dim, decompose_factor) shape = ((out_l, out_k), (in_m, in_n), *k_size) # ((a, b), (c, d), *k_size) use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2) use_w2 = r >= max(shape[0][1], shape[1][1]) / 2 use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1) else: raise TypeError(f"LoKr is not implemented for base layers of type {type(base_layer).__name__}") # Create weights with provided shape self.create_adapter_parameters(adapter_name, r, shape, use_w1, use_w2, use_effective_conv2d) # Initialize weights if init_weights: self.reset_adapter_parameters(adapter_name) else: self.reset_adapter_parameters_random(adapter_name) # Move new weights to device weight = getattr(self.get_base_layer(), "weight", None) if weight is not None: # the layer is already completely initialized, this is an update if weight.dtype.is_floating_point or weight.dtype.is_complex: self.to(weight.device, dtype=weight.dtype) else: self.to(weight.device) self.set_adapter(self.active_adapters) def get_delta_weight(self, adapter_name: str) -> torch.Tensor: # https://github.com/KohakuBlueleaf/LyCORIS/blob/e4259b870d3354a9615a96be61cb5d07455c58ea/lycoris/modules/lokr.py#L224 if adapter_name in self.lokr_w1: w1 = self.lokr_w1[adapter_name] else: w1 = self.lokr_w1_a[adapter_name] @ self.lokr_w1_b[adapter_name] if adapter_name in self.lokr_w2: w2 = self.lokr_w2[adapter_name] elif adapter_name in self.lokr_t2: w2 = make_weight_cp(self.lokr_t2[adapter_name], self.lokr_w2_a[adapter_name], self.lokr_w2_b[adapter_name]) else: w2 = self.lokr_w2_a[adapter_name] @ self.lokr_w2_b[adapter_name] # Make weights with Kronecker product weight = make_kron(w1, w2) weight = weight.reshape(self.get_base_layer().weight.shape) # Perform rank dropout during training - drop rows of addition weights rank_dropout = self.rank_dropout[adapter_name] if self.training and rank_dropout: drop = (torch.rand(weight.size(0)) > rank_dropout).float() drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device) drop /= drop.mean() weight *= drop return weight def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) # Execute all the adapters for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue module_dropout = self.module_dropout[active_adapter] # Modify current execution weights if (not self.training) or (self.training and torch.rand(1) > module_dropout): result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs) result = result.to(previous_dtype) return result class Linear(LoKrLayer): """LoKr implemented in Linear layer""" def __init__( self, base_layer: nn.Module, device: Optional[Union[str, torch.device]] = None, dtype: Optional[torch.dtype] = None, adapter_name: str = "default", r: int = 0, alpha: float = 0.0, rank_dropout: float = 0.0, module_dropout: float = 0.0, init_weights: bool = True, **kwargs, ): super().__init__(base_layer) # Create adapter and set it active self._active_adapter = adapter_name self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs) def _get_delta_activations( self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any ) -> torch.Tensor: delta_weight = self.get_delta_weight(adapter_name) # don't add bias here, because the bias is already included in the output of the base_layer return F.linear(input, delta_weight) def __repr__(self) -> str: rep = super().__repr__() return "lokr." + rep class Conv2d(LoKrLayer): """LoKr implemented in Conv2d layer""" def __init__( self, base_layer: nn.Module, device: Optional[Union[str, torch.device]] = None, dtype: Optional[torch.dtype] = None, adapter_name: str = "default", r: int = 0, alpha: float = 0.0, rank_dropout: float = 0.0, module_dropout: float = 0.0, use_effective_conv2d: bool = False, init_weights: bool = True, **kwargs, ): super().__init__(base_layer) # Create adapter and set it active self._active_adapter = adapter_name self.update_layer( adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs ) def _get_delta_activations( self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any ) -> torch.Tensor: delta_weight = self.get_delta_weight(adapter_name) # don't add bias here, because the bias is already included in the output of the base_layer base_layer = self.get_base_layer() return F.conv2d( input, delta_weight, stride=base_layer.stride, padding=base_layer.padding, dilation=base_layer.dilation, groups=base_layer.groups, ) def __repr__(self) -> str: rep = super().__repr__() return "lokr." + rep # Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py#L11 def factorization(dimension: int, factor: int = -1) -> Tuple[int, int]: """Factorizes the provided number into the product of two numbers Args: dimension (`int`): The number that needs to be factorized. factor (`int`, optional): Factorization divider. The algorithm will try to output two numbers, one of each will be as close to the factor as possible. If -1 is provided, the decomposition algorithm would try to search dividers near the square root of the dimension. Defaults to -1. Returns: Tuple[`int`, `int`]: A tuple of two numbers, whose product is equal to the provided number. The first number is always less than or equal to the second. Example: ```py >>> factorization(256, factor=-1) (16, 16) >>> factorization(128, factor=-1) (8, 16) >>> factorization(127, factor=-1) (1, 127) >>> factorization(128, factor=4) (4, 32) ``` """ if factor > 0 and (dimension % factor) == 0: m = factor n = dimension // factor return m, n if factor == -1: factor = dimension m, n = 1, dimension length = m + n while m < n: new_m = m + 1 while dimension % new_m != 0: new_m += 1 new_n = dimension // new_m if new_m + new_n > length or new_m > factor: break else: m, n = new_m, new_n if m > n: n, m = m, n return m, n def make_weight_cp(t, wa, wb): rebuild2 = torch.einsum("i j k l, i p, j r -> p r k l", t, wa, wb) # [c, d, k1, k2] return rebuild2 def make_kron(w1, w2, scale=1.0): if len(w2.shape) == 4: w1 = w1.unsqueeze(2).unsqueeze(2) w2 = w2.contiguous() rebuild = torch.kron(w1, w2) return rebuild * scale
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/p_tuning/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from dataclasses import dataclass, field from typing import Union from peft.config import PromptLearningConfig from peft.utils import PeftType class PromptEncoderReparameterizationType(str, enum.Enum): MLP = "MLP" LSTM = "LSTM" @dataclass class PromptEncoderConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PromptEncoder`]. Args: encoder_reparameterization_type (Union[[`PromptEncoderReparameterizationType`], `str`]): The type of reparameterization to use. encoder_hidden_size (`int`): The hidden size of the prompt encoder. encoder_num_layers (`int`): The number of layers of the prompt encoder. encoder_dropout (`float`): The dropout probability of the prompt encoder. """ encoder_reparameterization_type: Union[str, PromptEncoderReparameterizationType] = field( default=PromptEncoderReparameterizationType.MLP, metadata={"help": "How to reparameterize the prompt encoder"}, ) encoder_hidden_size: int = field( default=None, metadata={"help": "The hidden size of the prompt encoder"}, ) encoder_num_layers: int = field( default=2, metadata={"help": "The number of layers of the prompt encoder"}, ) encoder_dropout: float = field( default=0.0, metadata={"help": "The dropout of the prompt encoder"}, ) def __post_init__(self): self.peft_type = PeftType.P_TUNING
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/p_tuning/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .config import PromptEncoderConfig, PromptEncoderReparameterizationType from .model import PromptEncoder __all__ = ["PromptEncoder", "PromptEncoderConfig", "PromptEncoderReparameterizationType"]
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/p_tuning/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Based on https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/modules/common/prompt_encoder.py # with some refactor import warnings import torch from .config import PromptEncoderConfig, PromptEncoderReparameterizationType class PromptEncoder(torch.nn.Module): """ The prompt encoder network that is used to generate the virtual token embeddings for p-tuning. Args: config ([`PromptEncoderConfig`]): The configuration of the prompt encoder. Example: ```py >>> from peft import PromptEncoder, PromptEncoderConfig >>> config = PromptEncoderConfig( ... peft_type="P_TUNING", ... task_type="SEQ_2_SEQ_LM", ... num_virtual_tokens=20, ... token_dim=768, ... num_transformer_submodules=1, ... num_attention_heads=12, ... num_layers=12, ... encoder_reparameterization_type="MLP", ... encoder_hidden_size=768, ... ) >>> prompt_encoder = PromptEncoder(config) ``` **Attributes**: - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder. - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`. - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and `encoder_reparameterization_type="LSTM"`. - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model. - **input_size** (`int`) -- The input size of the prompt encoder. - **output_size** (`int`) -- The output size of the prompt encoder. - **hidden_size** (`int`) -- The hidden size of the prompt encoder. - **total_virtual_tokens** (`int`): The total number of virtual tokens of the prompt encoder. - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt encoder. Input shape: (`batch_size`, `total_virtual_tokens`) Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`) """ def __init__(self, config): super().__init__() self.token_dim = config.token_dim self.input_size = self.token_dim self.output_size = self.token_dim self.hidden_size = config.encoder_hidden_size self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules self.encoder_type = config.encoder_reparameterization_type # embedding self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim) if not config.inference_mode: if self.encoder_type == PromptEncoderReparameterizationType.LSTM: lstm_dropout = config.encoder_dropout num_layers = config.encoder_num_layers # LSTM self.lstm_head = torch.nn.LSTM( input_size=self.input_size, hidden_size=self.hidden_size, num_layers=num_layers, dropout=lstm_dropout, bidirectional=True, batch_first=True, ) self.mlp_head = torch.nn.Sequential( torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size * 2, self.output_size), ) elif self.encoder_type == PromptEncoderReparameterizationType.MLP: encoder_num_layers_default = PromptEncoderConfig.encoder_num_layers if config.encoder_num_layers != encoder_num_layers_default: warnings.warn( f"for {self.encoder_type.value}, the argument `encoder_num_layers` is ignored. " f"Exactly {encoder_num_layers_default} MLP layers are used." ) layers = [ torch.nn.Linear(self.input_size, self.hidden_size), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size, self.hidden_size), torch.nn.ReLU(), torch.nn.Linear(self.hidden_size, self.output_size), ] self.mlp_head = torch.nn.Sequential(*layers) else: raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.") def forward(self, indices): input_embeds = self.embedding(indices) if self.encoder_type == PromptEncoderReparameterizationType.LSTM: output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0]) elif self.encoder_type == PromptEncoderReparameterizationType.MLP: output_embeds = self.mlp_head(input_embeds) else: raise ValueError("Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.") return output_embeds
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/mixed/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .model import COMPATIBLE_TUNER_TYPES, MixedModel __all__ = ["COMPATIBLE_TUNER_TYPES", "MixedModel"]
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/mixed/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from typing import Any, Optional, Union from torch import nn from tqdm import tqdm from peft.tuners import adalora, loha, lokr, lora, oft from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists from peft.utils import ( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, PeftType, _get_submodules, get_auto_gptq_quant_linear, ) # Collection of constants used for all tuners COMPATIBLE_TUNER_TYPES = (PeftType.LORA, PeftType.LOHA, PeftType.LOKR, PeftType.ADALORA, PeftType.OFT) PREFIXES = [lora.LoraModel.prefix, lokr.LoKrModel.prefix, loha.LoHaModel.prefix, oft.OFTModel.prefix] Configs = Union[lora.LoraConfig, loha.LoHaConfig, lokr.LoKrConfig, adalora.AdaLoraConfig, oft.OFTConfig] Layers = (lora.layer.LoraLayer, loha.layer.LoHaLayer, lokr.layer.LoKrLayer, adalora.layer.AdaLoraLayer, oft.OFTLayer) class MixedModel(BaseTuner): """ A class that allows to mix different types of adapters in a single model. Note: This class should usually not be initialized directly. Instead, use `get_peft_model` with the argument `mixed=True`. Args: model (:obj:`nn.Module`): The model to be tuned. config (:obj:`PeftConfig`): The config of the model to be tuned. The adapter type must be compatible. adapter_name (:obj:`str`): The name of the first adapter. """ def __init__(self, model: nn.Module, config: Configs, adapter_name: str) -> None: super().__init__(model, config, adapter_name) def _check_new_adapter_config(self, config: Configs) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ if not isinstance(config, Configs.__args__): raise ValueError( f"{self.__class__.__name__} only supports {COMPATIBLE_TUNER_TYPES} configs, but got {type(config)}." ) biases = (getattr(config, "bias", None) for config in self.peft_config) biases = [bias for bias in biases if bias not in (None, "none")] if len(biases) > 1: raise ValueError( f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " "set bias to 'none' for all adapters." ) @staticmethod def _check_target_module_exists(config: Configs, key: str): return check_target_module_exists(config, key) def _create_and_replace( self, config: Configs, *args: Any, **kwargs: Any, ) -> None: if isinstance(config, adalora.AdaLoraConfig): adalora.AdaLoraModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, lora.LoraConfig): lora.LoraModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, loha.LoHaConfig): loha.LoHaModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, lokr.LoKrConfig): lokr.LoKrModel._create_and_replace(self, config, *args, **kwargs) elif isinstance(config, oft.OFTConfig): oft.OFTModel._create_and_replace(self, config, *args, **kwargs) else: raise ValueError(f"Unsupported config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.") def _replace_module(self, parent, child_name, new_module, child) -> None: setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.get_base_layer() elif hasattr(child, "quant_linear_module"): # TODO maybe not necessary to have special treatment? child = child.quant_linear_module if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) # dispatch to correct device for name, module in new_module.named_modules(): if any(prefix in name for prefix in PREFIXES): module.to(child.weight.device) if "ranknum" in name: module.to(child.weight.device) def _mark_only_adapters_as_trainable(self) -> None: for n, p in self.model.named_parameters(): if not any(prefix in n for prefix in PREFIXES): p.requires_grad = False for active_adapter in self.active_adapters: bias = getattr(self.peft_config[active_adapter], "bias", "none") if bias == "none": continue if bias == "all": for n, p in self.model.named_parameters(): if "bias" in n: p.requires_grad = True elif bias == "lora_only": # TODO: check if this is needed for other supported types for m in self.model.modules(): if isinstance(m, Layers) and hasattr(m, "bias") and m.bias is not None: m.bias.requires_grad = True else: raise ValueError(f"Requested bias: {bias}, is not implemented.") @staticmethod def _create_new_module(config, adapter_name, target, **kwargs): gptq_quantization_config = kwargs.get("gptq_quantization_config", None) AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config) if (gptq_quantization_config is not None) or (AutoGPTQQuantLinear is not None): raise ValueError(f"GPTQ quantization not supported for {config.peft_type.value} (yet).") loaded_in_8bit = kwargs.pop("loaded_in_8bit", False) loaded_in_4bit = kwargs.pop("loaded_in_4bit", False) if loaded_in_8bit or loaded_in_4bit: raise ValueError(f"8bit and 4bit quantization not supported for {config.peft_type.value} (yet).") if isinstance(config, adalora.AdaLoraConfig): new_module = adalora.AdaLoraModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, lora.LoraConfig): new_module = lora.LoraModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, loha.LoHaConfig): new_module = loha.LoHaModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, lokr.LoKrConfig): new_module = lokr.LoKrModel._create_new_module(config, adapter_name, target, **kwargs) elif isinstance(config, oft.OFTConfig): new_module = oft.OFTModel._create_new_module(config, adapter_name, target, **kwargs) else: raise ValueError(f"Unknown config type {type(config)}, should be one of {COMPATIBLE_TUNER_TYPES}.") return new_module def _set_adapter_layers(self, enabled=True): for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self): self._set_adapter_layers(enabled=True) def disable_adapter_layers(self): for active_adapter in self.active_adapters: val = getattr(self.peft_config[active_adapter], "bias", "none") if val != "none": msg = ( f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " "output as the the base model would without adaption." ) warnings.warn(msg) self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name: Union[str, list[str]]) -> None: for module in self.model.modules(): if isinstance(module, Layers): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _unload_and_optionally_merge( self, merge=True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None, ): if merge: if getattr(self.model, "quantization_method", None) == "gptq": raise ValueError("Cannot merge layers when the model is gptq quantized") def merge_recursively(module): # helper function to recursively merge the base_layer of the target path = [] layer = module while hasattr(layer, "base_layer"): path.append(layer) layer = layer.base_layer for layer_before, layer_after in zip(path[:-1], path[1:]): layer_after.merge(safe_merge=safe_merge, adapter_names=adapter_names) layer_before.base_layer = layer_after.base_layer module.merge(safe_merge=safe_merge, adapter_names=adapter_names) key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: merge_recursively(target) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` setattr(parent, target_name, target.modules_to_save[target.active_adapter]) return self.model def add_weighted_adapter(self, *args: Any, **kwargs: Any) -> None: raise NotImplementedError(f"Weighted adapters are not supported for {self.__class__.__name__} (yet).") def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None: """ Deletes an existing adapter. Args: adapter_name (Union[str, list[str]]): Name of the adapter(s) to delete. """ if isinstance(adapter_name, str): adapter_names = [adapter_name] else: adapter_names = adapter_name mismatched = set(adapter_names) - set(self.peft_config.keys()) if mismatched: raise ValueError( f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" ) for adapter_name in adapter_names: del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if not any(prefix in key for prefix in PREFIXES)] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, BaseTunerLayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or [] def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None ) -> nn.Module: r""" This method merges the layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self) -> nn.Module: """ Gets back the base model by removing all the lora modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False) def generate(self, *args: Any, **kwargs: Any): return self.model.generate(*args, **kwargs)
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/prefix_tuning/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from peft.config import PromptLearningConfig from peft.utils import PeftType @dataclass class PrefixTuningConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PrefixEncoder`]. Args: encoder_hidden_size (`int`): The hidden size of the prompt encoder. prefix_projection (`bool`): Whether to project the prefix embeddings. """ encoder_hidden_size: int = field( default=None, metadata={"help": "The hidden size of the encoder"}, ) prefix_projection: bool = field( default=False, metadata={"help": "Whether to project the prefix tokens"}, ) def __post_init__(self): self.peft_type = PeftType.PREFIX_TUNING
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/prefix_tuning/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .config import PrefixTuningConfig from .model import PrefixEncoder __all__ = ["PrefixTuningConfig", "PrefixEncoder"]
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/prefix_tuning/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Based on https://github.com/THUDM/P-tuning-v2/blob/main/model/prefix_encoder.py # with some refactor import torch class PrefixEncoder(torch.nn.Module): r""" The `torch.nn` model to encode the prefix. Args: config ([`PrefixTuningConfig`]): The configuration of the prefix encoder. Example: ```py >>> from peft import PrefixEncoder, PrefixTuningConfig >>> config = PrefixTuningConfig( ... peft_type="PREFIX_TUNING", ... task_type="SEQ_2_SEQ_LM", ... num_virtual_tokens=20, ... token_dim=768, ... num_transformer_submodules=1, ... num_attention_heads=12, ... num_layers=12, ... encoder_hidden_size=768, ... ) >>> prefix_encoder = PrefixEncoder(config) ``` **Attributes**: - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder. - **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if `prefix_projection` is `True`. - **prefix_projection** (`bool`) -- Whether to project the prefix embeddings. Input shape: (`batch_size`, `num_virtual_tokens`) Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`) """ def __init__(self, config): super().__init__() self.prefix_projection = config.prefix_projection token_dim = config.token_dim num_layers = config.num_layers encoder_hidden_size = config.encoder_hidden_size num_virtual_tokens = config.num_virtual_tokens if self.prefix_projection and not config.inference_mode: # Use a two-layer MLP to encode the prefix self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim) self.transform = torch.nn.Sequential( torch.nn.Linear(token_dim, encoder_hidden_size), torch.nn.Tanh(), torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim), ) else: self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim) def forward(self, prefix: torch.Tensor): if self.prefix_projection: prefix_tokens = self.embedding(prefix) past_key_values = self.transform(prefix_tokens) else: past_key_values = self.embedding(prefix) return past_key_values
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/adaption_prompt/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import namedtuple from dataclasses import dataclass, field from peft.config import PeftConfig from peft.utils import PeftType from .utils import llama_compute_query_states @dataclass class AdaptionPromptConfig(PeftConfig): """Stores the configuration of an [`AdaptionPromptModel`].""" target_modules: str = field( default=None, metadata={"help": "Name of the attention submodules to insert adaption prompts into."} ) adapter_len: int = field(default=None, metadata={"help": "Number of adapter tokens to insert"}) adapter_layers: int = field(default=None, metadata={"help": "Number of adapter layers (from the top)"}) def __post_init__(self): self.peft_type = PeftType.ADAPTION_PROMPT @property def is_adaption_prompt(self) -> bool: """Return True if this is an adaption prompt config.""" return True # Contains the config that is specific to a transformers model type. ModelTypeConfig = namedtuple( "ModelTypeConfig", ["compute_query_states", "target_modules", "k_proj_layer", "v_proj_layer", "o_proj_layer"] ) # Mapping of transformers model types to their specific configuration. TRANSFORMERS_MODEL_CONFIG = { "llama": ModelTypeConfig( compute_query_states=llama_compute_query_states, target_modules="self_attn", k_proj_layer="k_proj", v_proj_layer="v_proj", o_proj_layer="o_proj", ), } def prepare_config( peft_config: AdaptionPromptConfig, model, ) -> AdaptionPromptConfig: """Prepare the config based on the llama model type.""" if model.config.model_type not in TRANSFORMERS_MODEL_CONFIG: raise ValueError("Unsupported model type for adaption prompt: '{model.config.model_type}'.") model_config = TRANSFORMERS_MODEL_CONFIG[model.config.model_type] if peft_config.target_modules is None: peft_config.target_modules = model_config.target_modules return peft_config
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/adaption_prompt/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .config import AdaptionPromptConfig from .layer import AdaptedAttention from .model import AdaptionPromptModel __all__ = ["AdaptionPromptConfig", "AdaptedAttention", "AdaptionPromptModel"]
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/adaption_prompt/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, List import torch.nn as nn from peft.utils import _freeze_adapter, _get_submodules from .config import AdaptionPromptConfig, prepare_config from .layer import AdaptedAttention from .utils import is_adaption_prompt_trainable class AdaptionPromptModel(nn.Module): """ Implements adaption prompts as described in https://arxiv.org/pdf/2303.16199.pdf. The top L attention modules are replaced with AdaptedAttention modules that wrap the original ones, but insert trainable prompts with gates (for zero init). Notes on the multi-adapter pattern: - We store the states of different adapters by keeping a dictionary of AdaptedAttention modules indexed by adapter name. - Every time we switch adapters, we remove the modules of the currently active adapter from the model, store them in the dictionary, and replace them with the modules of the new adapter. - To avoid duplicated and potentially inconsistent state, the currently active adapter is always removed from the dictionary. - Disabling the adapter would also result in the modules being removed from the model. """ def __init__(self, model, configs: Dict, adapter_name: str): super().__init__() self.model = model # Store adapter configs by name. self.peft_config: Dict[str, AdaptionPromptConfig] = {} # Store lists of the parents of the affected attention modules by adapter name. # We keep references to the parents so we can swap the adapters in-and-out of the model. self._parents: Dict[str, List[nn.Module]] = {} # Store lists of cached AdaptedAttention modules by name. self._cached_adapters: Dict[str, List] = {} # The name of the currently active adapter. self._active_adapter = None # Whether the adapter is enabled. self._enabled = True self.forward = self.model.forward self.add_adapter(adapter_name, configs[adapter_name]) self._mark_only_adaption_prompts_as_trainable() def add_adapter(self, adapter_name: str, config: AdaptionPromptConfig) -> None: """Add an adapter with the given name and config.""" config = prepare_config(config, self.model) if adapter_name in self.peft_config: raise ValueError(f"Adapter with name '{adapter_name}' already exists.") parents = [] for name, _ in self.model.named_modules(): if name.endswith(config.target_modules): par, _, _ = _get_submodules(self.model, name) parents.append(par) if len(parents) < config.adapter_layers: raise ValueError( f"Config specifies more adapter layers '{config.adapter_layers}'" f" than the model has '{len(parents)}'." ) # Note that if the target modules are not in Sequential, ModuleList, or # some other PyTorch ordered container, the behavior is undefined as we # assume here that the order of the modules is the same as the order of # the transformer decoder layers. parents = parents[-config.adapter_layers :] self._parents[adapter_name] = parents # It is only None during initialization. # If it is disabled, we don't have to remove the modules. if self._active_adapter is not None and self._enabled: self._remove_adapted_attentions(self._active_adapter) self._active_adapter = adapter_name self.peft_config[adapter_name] = config self._create_adapted_attentions(config, parents) if not self._enabled: self._remove_adapted_attentions(self._active_adapter) if config.inference_mode: _freeze_adapter(self.model, adapter_name) def set_adapter(self, adapter_name: str) -> None: """Set the model to use the adapter with the given name.""" if self._active_adapter == adapter_name: return if adapter_name not in self.peft_config: raise ValueError(f"Adapter with name '{adapter_name}' does not exist.") if self._enabled: self._remove_adapted_attentions(self._active_adapter) self._set_adapted_attentions(adapter_name) self._active_adapter = adapter_name def enable_adapter_layers(self): """Enable adapter layers by swapping in cached AdaptedAttention modules.""" self._enabled = True self._set_adapted_attentions(self._active_adapter) def disable_adapter_layers(self): """Disable adapter layers by swapping out AdaptedAttention modules.""" self._enabled = False self._remove_adapted_attentions(self._active_adapter) def _create_adapted_attentions(self, config: AdaptionPromptConfig, parents: List[nn.Module]) -> None: """Wrap LlamaAttention modules with newly created AdaptedAttention modules.""" for par in parents: attn = AdaptedAttention( model_type=self.model.config.model_type, adapter_len=config.adapter_len, model=getattr(par, config.target_modules), ) setattr(par, config.target_modules, attn) def _set_adapted_attentions(self, adapter_name: str) -> None: """Replace LlamaAttention modules with cached AdaptedAttention modules.""" cached = self._cached_adapters[adapter_name] del self._cached_adapters[adapter_name] config = self.peft_config[adapter_name] for i, par in enumerate(self._parents[adapter_name]): setattr(par, config.target_modules, cached[i]) def _remove_adapted_attentions(self, adapter_name: str) -> None: """Remove AdaptedAttention modules from the model and store them in the cache.""" config = self.peft_config[adapter_name] adapted_attentions = [] for par in self._parents[adapter_name]: attn = getattr(par, config.target_modules) adapted_attentions.append(attn) setattr(par, config.target_modules, attn.model) self._cached_adapters[adapter_name] = adapted_attentions def _mark_only_adaption_prompts_as_trainable(self) -> None: """Freeze all parameters of the model except the adaption prompts.""" for n, p in self.model.named_parameters(): if not is_adaption_prompt_trainable(n): p.requires_grad = False def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: # This is necessary as e.g. causal models have various methods that we # don't want to re-implement here. return getattr(self.model, name)
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/adaption_prompt/utils.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn def llama_rotate_half(x: torch.Tensor) -> torch.Tensor: """ Rotate half the hidden dims of the input. This function was duplicated verbatim from: https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L126 This was done to eliminate the Llama transformers implementation as a dependency of this file. Note that some other functions were also adapted from the transformers implementation but were modified. """ x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def llama_apply_rotary_pos_emb(q, cos, sin, position_ids): """ Apply rotary position embedding to query states in the Llama model. This function was adapted from: https://github.com/huggingface/transformers/blob/1de8ce9ee1191ba761a593ac15d9ccbf5851bfc5/src/transformers/models/llama/modeling_llama.py#L133 It was modified to remove unnecessary processing of key states. The method is compatible with transformers <= 4.34.2 and also with the latest version (>=4.35). """ # In previous transformers version cos/sin cached had a shape of 4D if len(cos.shape) == 4: gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1] gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3]) cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) # In the new version, it is 2D so we fall back to the new implementation # https://github.com/huggingface/transformers/blame/eef7ea98c31a333bacdc7ae7a2372bde772be8e4/src/transformers/models/llama/modeling_llama.py#L222-L226 else: cos = cos[position_ids].unsqueeze(1) sin = sin[position_ids].unsqueeze(1) q_embed = (q * cos) + (llama_rotate_half(q) * sin) return q_embed def llama_compute_query_states(model: nn.Module, **kwargs) -> torch.Tensor: """ Compute query states for Llama models specifically. They need to be recomputed as the forward() method of the original LlamaModel in the transformers library does not return them. See the related discussion in the PR: https://github.com/huggingface/peft/pull/268 """ hidden_states = kwargs.get("hidden_states") position_ids = kwargs.get("position_ids") past_key_value = kwargs.get("past_key_value") bsz, q_len, _ = hidden_states.size() query_states = model.q_proj(hidden_states).view(bsz, q_len, model.num_heads, model.head_dim).transpose(1, 2) value_states = model.v_proj(hidden_states).view(bsz, q_len, model.num_heads, model.head_dim).transpose(1, 2) seq_len = q_len if past_key_value is not None: seq_len += past_key_value[0].shape[-2] cos, sin = model.rotary_emb(value_states, seq_len=seq_len) return llama_apply_rotary_pos_emb(query_states, cos, sin, position_ids) def is_adaption_prompt_trainable(params: str) -> bool: """Return True if module is trainable under adaption prompt fine-tuning.""" return params.split(".")[-1].startswith("adaption_")
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/adaption_prompt/layer.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import torch import torch.nn as nn import torch.nn.functional as F from .config import TRANSFORMERS_MODEL_CONFIG class AdaptedAttention(nn.Module): """This module wraps a LLamaAttention module and injects adaption prompts.""" def __init__(self, model_type: str, adapter_len: int, model): """ Initialize object. Args: model_type: The transformer model type. This is used to retrieve the right method to compute query states. adapter_len: The length of the adaption prompt to insert. model: The original transformer attention module that is being wrapped. """ assert not isinstance(model, AdaptedAttention) super().__init__() self.model_type = model_type self.model = model self.adapter_len = adapter_len # Assume all parameters of the attention model we are wrapping are on the same device. device = next(model.parameters()).device # Don't think this was specified in the paper, but we follow the official repo which used an Embedding # which initializes the tokens with standard normal values. # https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L234 # (bsz, adapter_len, hidden_size) target_dtype = ( model.q_proj.weight.dtype if model.q_proj.weight.dtype not in [torch.int8, torch.uint8] else torch.float32 ) self.adaption_prompt = nn.Parameter( torch.empty(1, adapter_len, self.model.hidden_size, device=device, dtype=target_dtype).normal_() ) # Initialize the gate to 0 as this is "zero-init". self.adaption_gate = nn.Parameter(torch.zeros(1, device=device, dtype=target_dtype)) def forward(self, **kwargs): """ Forward pass for the adapter which wraps the original LlamaAttention module. "Official" paper implementation: https://github.com/ZrrSkywalker/LLaMA-Adapter/blob/41c3546fe1997ab8a65809dc8d8f9252b19d9faf/llama/model.py#L141 Args: kwargs: See the original LlamaAttention module. """ if kwargs.get("output_attention", False): raise NotImplementedError("output_attention is not currently supported.") output, _, past_key_value = self.model(**kwargs) bsz = output.shape[0] q_len = output.shape[1] embed_dim = output.shape[2] k_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].k_proj_layer v_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].v_proj_layer o_proj_layer = TRANSFORMERS_MODEL_CONFIG[self.model_type].o_proj_layer if k_proj_layer == v_proj_layer: _, key, value = getattr(self.model, k_proj_layer)(self.adaption_prompt).split(embed_dim, dim=2) else: key = getattr(self.model, k_proj_layer)(self.adaption_prompt) value = getattr(self.model, v_proj_layer)(self.adaption_prompt) # (bsz, num_heads, adapter_len, head_dim) adapter_k = ( key.view(1, self.adapter_len, self.model.num_heads, self.model.head_dim) .repeat(bsz, 1, 1, 1) .transpose(1, 2) ) # (bsz, num_heads, adapter_len, head_dim) adapter_v = ( value.view(1, self.adapter_len, self.model.num_heads, self.model.head_dim) .repeat(bsz, 1, 1, 1) .transpose(1, 2) ) # Recompute query states. compute_query_states = TRANSFORMERS_MODEL_CONFIG[self.model_type].compute_query_states # (bsz, num_heads, q_len, head_dim) query_states = compute_query_states(model=self.model, **kwargs) previous_dtype = query_states.dtype # (bsz, num_heads, q_len, adapter_len) scores = torch.matmul(query_states, adapter_k.transpose(2, 3).to(previous_dtype)) / math.sqrt( self.model.head_dim ) # Upcast attention to fp32 # (bsz, num_heads, q_len, adapter_len) scores = self.adaption_gate * F.softmax(scores, dim=-1, dtype=torch.float32).to(previous_dtype) # (bsz, q_len, num_heads * head_dim) adapter_output = torch.matmul(scores, adapter_v).transpose(1, 2).reshape(bsz, q_len, -1) # (bsz, q_len, hidden_size) if o_proj_layer is not None: adapter_output = getattr(self.model, o_proj_layer)(adapter_output) # Add adaption prompt output to original output. output = output + adapter_output # Restore original dtype. output = output.to(previous_dtype) return output, None, past_key_value
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/ia3/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import List, Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class IA3Config(PeftConfig): """ This is the configuration class to store the configuration of a [`IA3Model`]. Args: target_modules (`Union[List[str],str]`): The names of the modules to apply (IA)^3 to. feedforward_modules (`Union[List[str],str]`): The names of the modules to be treated as feedforward modules, as in the original paper. These modules will have (IA)^3 vectors multiplied to the input, instead of the output. feedforward_modules must be a name or a subset of names present in target_modules. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. modules_to_save (`List[str]`): List of modules apart from (IA)^3 layers to be set as trainable and saved in the final checkpoint. init_ia3_weights (`bool`): Whether to initialize the vectors in the (IA)^3 layers, defaults to `True`. """ target_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with ia3." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " }, ) feedforward_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "List of module names or a regex expression of module names which are feedforward" "For example, ['output.dense']" }, ) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) modules_to_save: Optional[List[str]] = field( default=None, metadata={ "help": "List of modules apart from (IA)^3 layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) init_ia3_weights: bool = field( default=True, metadata={"help": "Whether to initialize the vectors in the (IA)^3 layers."}, ) def __post_init__(self): self.peft_type = PeftType.IA3 self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) self.feedforward_modules = ( set(self.feedforward_modules) if isinstance(self.feedforward_modules, list) else self.feedforward_modules ) # check if feedforward_modules is a subset of target_modules. run the check only if both are sets if isinstance(self.feedforward_modules, set) and isinstance(self.target_modules, set): if not self.feedforward_modules.issubset(self.target_modules): raise ValueError("`feedforward_modules` should be a subset of `target_modules`")
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/ia3/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from peft.import_utils import is_bnb_4bit_available, is_bnb_available from .config import IA3Config from .layer import Conv2d, IA3Layer, Linear from .model import IA3Model __all__ = ["Conv2d", "IA3Config", "IA3Layer", "IA3Model", "Linear"] def __getattr__(name): if (name == "Linear8bitLt") and is_bnb_available(): from .bnb import Linear8bitLt return Linear8bitLt if (name == "Linear4bit") and is_bnb_4bit_available(): from .bnb import Linear4bit return Linear4bit raise AttributeError(f"module {__name__} has no attribute {name}")
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/ia3/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import re import warnings from dataclasses import asdict from enum import Enum from typing import List, Optional import torch from transformers.pytorch_utils import Conv1D from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists from peft.utils import ( TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _get_submodules, ) from .layer import Conv2d, IA3Layer, Linear class IA3Model(BaseTuner): """ Creates a Infused Adapter by Inhibiting and Amplifying Inner Activations ((IA)^3) model from a pretrained transformers model. The method is described in detail in https://arxiv.org/abs/2205.05638 Args: model ([`~transformers.PreTrainedModel`]): The model to be adapted. config ([`IA3Config`]): The configuration of the (IA)^3 model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. Returns: `torch.nn.Module`: The (IA)^3 model. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM, ia3Config >>> from peft import IA3Model, IA3Config >>> config = IA3Config( ... peft_type="IA3", ... task_type="SEQ_2_SEQ_LM", ... target_modules=["k", "v", "w0"], ... feedforward_modules=["w0"], ... ) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> ia3_model = IA3Model(config, model) ``` **Attributes**: - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`ia3Config`]): The configuration of the (IA)^3 model. """ prefix: str = "ia3_" def __init__(self, model, config, adapter_name): super().__init__(model, config, adapter_name) @staticmethod def _create_new_module(ia3_config, adapter_name, target, **kwargs): # avoid eager bnb import if is_bnb_available(): import bitsandbytes as bnb from .bnb import Linear8bitLt if is_bnb_4bit_available(): from .bnb import Linear4bit loaded_in_8bit = kwargs.pop("loaded_in_8bit", False) loaded_in_4bit = kwargs.pop("loaded_in_4bit", False) is_feedforward = kwargs.pop("is_feedforward", False) if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): eightbit_kwargs = kwargs.copy() eightbit_kwargs.update( { "has_fp16_weights": target.state.has_fp16_weights, "memory_efficient_backward": target.state.memory_efficient_backward, "threshold": target.state.threshold, "index": target.index, } ) new_module = Linear8bitLt(target, adapter_name, is_feedforward=is_feedforward, **eightbit_kwargs) elif loaded_in_4bit and isinstance(target_base_layer, bnb.nn.Linear4bit): fourbit_kwargs = kwargs.copy() fourbit_kwargs.update( { "compute_dtype": target.compute_dtype, "compress_statistics": target.weight.compress_statistics, "quant_type": target.weight.quant_type, } ) new_module = Linear4bit(target, adapter_name, is_feedforward=is_feedforward, **fourbit_kwargs) elif isinstance(target, torch.nn.Conv2d): new_module = Conv2d(target, adapter_name, is_feedforward=is_feedforward, **kwargs) elif isinstance(target_base_layer, torch.nn.Linear): if kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " "Setting fan_in_fan_out to False." ) kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = False new_module = Linear(target, adapter_name, is_feedforward=is_feedforward, **kwargs) elif isinstance(target_base_layer, Conv1D): if not kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True." ) kwargs["fan_in_fan_out"] = ia3_config.fan_in_fan_out = True new_module = Linear( target, adapter_name, is_feedforward=is_feedforward, is_target_conv_1d_layer=True, **kwargs ) else: raise ValueError( f"Target module {target} is not supported. " f"Currently, only `torch.nn.Linear`, `torch.nn.Conv2d`, and `Conv1D` are supported." ) return new_module @staticmethod def _check_target_module_exists(ia3_config, key): return check_target_module_exists(ia3_config, key) def _mark_only_adapters_as_trainable(self) -> None: for n, p in self.model.named_parameters(): if self.prefix not in n: p.requires_grad = False def _create_and_replace( self, ia3_config, adapter_name, target, target_name, parent, **optional_kwargs, ): loaded_in_8bit = optional_kwargs["loaded_in_8bit"] loaded_in_4bit = optional_kwargs["loaded_in_4bit"] current_key = optional_kwargs["current_key"] # check if target module is in feedforward_modules is_feedforward = self._check_target_module_feedforward(ia3_config, current_key) kwargs = { "fan_in_fan_out": ia3_config.fan_in_fan_out, "init_ia3_weights": ia3_config.init_ia3_weights, "loaded_in_8bit": loaded_in_8bit, "loaded_in_4bit": loaded_in_4bit, "is_feedforward": is_feedforward, } if isinstance(target, Conv2d): target.update_layer( adapter_name, ia3_config.init_ia3_weights, ) elif isinstance(target, Linear): target.update_layer( adapter_name, ia3_config.init_ia3_weights, ) else: new_module = self._create_new_module(ia3_config, adapter_name, target, **kwargs) if adapter_name != self.active_adapter: # adding an additional adapter: it is not automatically trainable new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) @staticmethod def _check_target_module_feedforward(ia3_config, key) -> bool: """ A helper private method that checks if the target module `key` matches with a feedforward module specified in `ia3_config` """ if isinstance(ia3_config.feedforward_modules, str): is_feedforward = bool(re.fullmatch(ia3_config.feedforward_modules, key)) else: is_feedforward = any(key.endswith(target_key) for target_key in ia3_config.feedforward_modules) return is_feedforward def _replace_module(self, parent, child_name, new_module, child): setattr(parent, child_name, new_module) # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.base_layer # layers with base_layer don't need the weight to be copied, as they have a reference already if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) # dispatch to correct device for name, module in new_module.named_modules(): if self.prefix in name: module.to(child.weight.device) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.model, name) def get_peft_config_as_dict(self, inference: bool = False): config_dict = {} for key, value in self.peft_config.items(): config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} if inference: config["inference_mode"] = True config_dict[key] = config return config def _set_adapter_layers(self, enabled=True): for module in self.model.modules(): if isinstance(module, (IA3Layer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self) -> None: """Enable all adapters. Call this if you have previously disabled all adapters and want to re-enable them. """ self._set_adapter_layers(enabled=True) def disable_adapter_layers(self) -> None: """Disable all adapters. When disabling all adapters, the model output corresponds to the output of the base model. """ self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name: str | list[str]) -> None: """Set the active adapter(s). Args: adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. """ for module in self.model.modules(): if isinstance(module, IA3Layer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) def _prepare_adapter_config(self, peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING[model_config["model_type"]] if peft_config.feedforward_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING: raise ValueError("Please specify `feedforward_modules` in `peft_config`") peft_config.feedforward_modules = TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING[ model_config["model_type"] ] return peft_config def _unload_and_optionally_merge( self, merge: bool = True, safe_merge: bool = False, adapter_names: Optional[List[str]] = None ): r""" This method merges the (IA)^3 layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: safe_merge (`bool`, `optional`, defaults to `False`): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ if getattr(self.model, "is_loaded_in_8bit", False): raise ValueError("Cannot merge ia3 layers when the model is loaded in 8-bit mode") if getattr(self.model, "is_loaded_in_4bit", False): raise ValueError("Cannot merge ia3 layers when the model is loaded in 4-bit mode") self._unloading_checks(adapter_names) key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] for key in key_list: try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: target.merge(safe_merge=safe_merge, adapter_names=adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` setattr(parent, target_name, target.modules_to_save[target.active_adapter]) return self.model def merge_and_unload(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> torch.nn.Module: r""" This method merges the IA³ layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModel >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b") >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample" >>> model = PeftModel.from_pretrained(base_model, peft_model_id) >>> merged_model = model.merge_and_unload() ``` """ return self._unload_and_optionally_merge(safe_merge=safe_merge, adapter_names=adapter_names) def unload(self) -> torch.nn.Module: """ Gets back the base model by removing all the IA³ modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False) def delete_adapter(self, adapter_name: str) -> None: """ Deletes an existing adapter. Args: adapter_name (str): Name of the adapter to be deleted. """ if adapter_name not in self.peft_config: raise ValueError(f"Adapter {adapter_name} does not exist") del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, IA3Layer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or []
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/ia3/layer.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Any, List, Optional import torch import torch.nn as nn from transformers.pytorch_utils import Conv1D from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import transpose class IA3Layer(BaseTunerLayer): # All names of layers that may contain adapter weights adapter_layer_names = ("ia3_l",) def __init__(self, base_layer: nn.Module, is_feedforward: bool, **kwargs) -> None: self.base_layer = base_layer self.ia3_l = nn.ParameterDict({}) # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] self.is_feedforward = is_feedforward base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features elif isinstance(base_layer, nn.Conv2d): in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Embedding): in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim elif isinstance(base_layer, Conv1D): in_features, out_features = ( base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape ) else: raise ValueError(f"Unsupported layer type {type(base_layer)}") self.in_features = in_features self.out_features = out_features def update_layer(self, adapter_name, init_ia3_weights): # Actual trainable parameters if self.is_feedforward: weight = torch.randn((1, self.in_features)) else: weight = torch.randn((self.out_features, 1)) self.ia3_l[adapter_name] = nn.Parameter(weight) if init_ia3_weights: self.reset_ia3_parameters(adapter_name) self.to(self.get_base_layer().weight.device) self.set_adapter(self.active_adapters) def reset_ia3_parameters(self, adapter_name): if adapter_name in self.ia3_l.keys(): # initialize learned vector with torch.ones nn.init.constant_(self.ia3_l[adapter_name], 1.0) class Linear(nn.Module, IA3Layer): # (IA)^3 implemented in a dense layer def __init__( self, base_layer: nn.Module, adapter_name: str, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer is_target_conv_1d_layer: bool = False, # whether target module is a conv1d layer. useful while unloading later init_ia3_weights: bool = True, # whether to initialize IA3 weights **kwargs, ) -> None: super().__init__() IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) self.fan_in_fan_out = fan_in_fan_out self.is_target_conv_1d_layer = is_target_conv_1d_layer self._active_adapter = adapter_name self.update_layer(adapter_name, init_ia3_weights) def update_layer(self, adapter_name, init_ia3_weights): # Actual trainable parameters if self.is_feedforward: weight = torch.randn((1, self.in_features)) else: weight = torch.randn((self.out_features, 1)) self.ia3_l[adapter_name] = nn.Parameter(weight) if init_ia3_weights: self.reset_ia3_parameters(adapter_name) self.to(self.get_base_layer().weight.device) self.set_adapter(self.active_adapters) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ if self.merged: warnings.warn( f"Already following adapters were merged {','.join(self.merged_adapters)}. " f"You are now additionally merging {','.join(self.active_adapters)}." ) if adapter_names is None: adapter_names = self.active_adapters for active_adapter in adapter_names: if active_adapter in self.ia3_l.keys(): base_layer = self.get_base_layer() ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) if safe_merge: orig_weights = base_layer.weight.data orig_weights = torch.mul(orig_weights, ia3_l) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_l) if not self.is_feedforward and (base_layer.bias is not None): scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return warnings.warn("Unmerge result can be inaccurate for (IA)^3.") while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.ia3_l.keys(): base_layer = self.get_base_layer() # Add tolerace to avoid division by zero ia3_l = transpose(self.ia3_l[active_adapter].data, self.fan_in_fan_out) + 1e-8 base_layer.weight.data = torch.div(base_layer.weight.data, ia3_l) if not self.is_feedforward and (base_layer.bias is not None): scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) base_layer.bias.data = torch.div(base_layer.bias.data, scaling.data + 1e-8) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: dtype = previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: ia3_scaling = 1 for active_adapter in self.active_adapters: if active_adapter not in self.ia3_l.keys(): continue dtype = self.ia3_l[active_adapter].dtype ia3_scaling *= self.ia3_l[active_adapter].flatten() if self.is_feedforward: x = x.to(dtype) # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype # e.g. bf16 vs fp32. Is that okay? interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype) result = self.base_layer(interm, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) result = result.to(dtype) * ia3_scaling result = result.to(previous_dtype) return result class Conv2d(nn.Module, IA3Layer): def __init__( self, base_layer: nn.Module, adapter_name: str, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_feedforward: bool = False, # Set to True if the layer is treated as a feedforward layer init_ia3_weights: bool = True, **kwargs, ) -> None: super().__init__() IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer(adapter_name, init_ia3_weights) def update_layer(self, adapter_name, init_ia3_weights): # Actual trainable parameters if self.is_feedforward: weight = torch.randn((1, self.in_features, 1, 1)) else: weight = torch.randn((1, self.out_features, 1, 1)) self.ia3_l[adapter_name] = nn.Parameter(weight) if init_ia3_weights: self.reset_ia3_parameters(adapter_name) self.to(self.get_base_layer().weight.device) self.set_adapter(self.active_adapters) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ if self.merged: warnings.warn( f"Already following adapters were merged {','.join(self.merged_adapters)}. " f"You are now additionally merging {','.join(self.active_adapters)}." ) if adapter_names is None: adapter_names = self.active_adapters for active_adapter in adapter_names: if active_adapter in self.ia3_l.keys(): base_layer = self.get_base_layer() ia3_scaling = self.ia3_l[active_adapter].data if not self.is_feedforward: ia3_scaling = ia3_scaling.permute(1, 0, 2, 3) if safe_merge: output_weight = torch.mul(base_layer.weight.data, ia3_scaling).clone() if not torch.isfinite(output_weight).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = output_weight else: base_layer.weight.data = torch.mul(base_layer.weight.data, ia3_scaling) if not self.is_feedforward and (base_layer.bias is not None): scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return warnings.warn("Unmerge result can be inaccurate for (IA)^3.") while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.ia3_l.keys(): base_layer = self.get_base_layer() # divide by (IA)^3 vector. Add tolerace to avoid division by zero ia3_scaling = self.ia3_l[active_adapter].data if not self.is_feedforward: ia3_scaling = ia3_scaling.permute(1, 0, 2, 3) base_layer.weight.data = torch.div(base_layer.weight.data, ia3_scaling + 1e-8) if not self.is_feedforward and (base_layer.bias is not None): scaling = self.ia3_l[active_adapter].reshape(base_layer.bias.shape) base_layer.bias.data = torch.mul(base_layer.bias.data, scaling.data) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: dtype = previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: ia3_scaling = 1 for active_adapter in self.active_adapters: if active_adapter not in self.ia3_l.keys(): continue dtype = self.ia3_l[active_adapter].dtype ia3_scaling *= self.ia3_l[active_adapter] if self.is_feedforward: x = x.to(dtype) # TODO: weight.dtype can be != self.ia3_l[self.active_adapters].dtype # e.g. bf16 vs fp32. Is that okay? interm = (x * ia3_scaling).to(self.get_base_layer().weight.dtype) result = self.base_layer(interm, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) result = result.to(dtype) * ia3_scaling result = result.to(previous_dtype) return result
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/ia3/bnb.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available from .layer import IA3Layer if is_bnb_available(): class Linear8bitLt(torch.nn.Module, IA3Layer): # (IA)^3 implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, is_feedforward: bool, init_ia3_weights: bool = True, **kwargs, ) -> None: super().__init__() IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) # Freezing the pre-trained weight matrix self.get_base_layer().weight.requires_grad = False self.update_layer(adapter_name, init_ia3_weights) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # note: no check for self.merged because merging is not supported (yet) if self.disable_adapters: return self.base_layer(x) ia3_scaling = 1 for active_adapter in self.active_adapters: if active_adapter not in self.ia3_l.keys(): continue ia3_scaling *= self.ia3_l[active_adapter].flatten() requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32) if requires_conversion: x = x.float() if self.is_feedforward: result = self.base_layer(x * ia3_scaling) expected_dtype = result.dtype else: result = self.base_layer(x) expected_dtype = result.dtype result = result * ia3_scaling if requires_conversion: result = result.to(expected_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "ia3." + rep if is_bnb_4bit_available(): class Linear4bit(torch.nn.Module, IA3Layer): # IA3 implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, is_feedforward: bool, init_ia3_weights: bool = True, **kwargs, ) -> None: super().__init__() IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) # Freezing the pre-trained weight matrix self.get_base_layer().weight.requires_grad = False self.update_layer(adapter_name, init_ia3_weights) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # note: no check for self.merged because merging is not supported (yet) if self.disable_adapters: return self.base_layer(x) ia3_scaling = 1 for active_adapter in self.active_adapters: if active_adapter not in self.ia3_l.keys(): continue ia3_scaling *= self.ia3_l[active_adapter].flatten() requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32) if requires_conversion: x = x.float() if self.is_feedforward: result = self.base_layer(x * ia3_scaling) expected_dtype = result.dtype else: result = self.base_layer(x) expected_dtype = result.dtype result = result * ia3_scaling result = result.clone() # adalora.py and lora.py both suggest that this is necessary for 4-bit training on older versions of Pytorch. # This has been duplicated here. if requires_conversion: result = result.to(expected_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "ia3." + rep
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/adalora/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional from peft.tuners.lora import LoraConfig from peft.utils import PeftType @dataclass class AdaLoraConfig(LoraConfig): """ This is the configuration class to store the configuration of a [`~peft.AdaLora`]. Args: target_r (`int`): The target average rank of incremental matrix. init_r (`int`): The initial rank for each incremental matrix. tinit (`int`): The steps of initial fine-tuning warmup. tfinal (`int`): The step of final fine-tuning. deltaT (`int`): The time internval between two budget allocations. beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing. beta2 (`float`): The hyperparameter of EMA for undertainty quantification. orth_reg_weight (`float`): The coefficient of orthogonal regularization. total_step (`int`): The total training steps that should be specified before training. rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator. """ target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."}) init_r: int = field(default=12, metadata={"help": "Intial Lora matrix dimension."}) tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."}) tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."}) deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."}) beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."}) total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."}) rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."}) def __post_init__(self): self.peft_type = PeftType.ADALORA
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/adalora/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from peft.import_utils import is_bnb_4bit_available, is_bnb_available from .config import AdaLoraConfig from .gptq import SVDQuantLinear from .layer import AdaLoraLayer, RankAllocator, SVDLinear from .model import AdaLoraModel __all__ = ["AdaLoraConfig", "AdaLoraLayer", "AdaLoraModel", "SVDLinear", "RankAllocator", "SVDQuantLinear"] def __getattr__(name): if (name == "SVDLinear8bitLt") and is_bnb_available(): from .bnb import SVDLinear8bitLt return SVDLinear8bitLt if (name == "SVDLinear4bit") and is_bnb_4bit_available(): from .bnb import SVDLinear4bit return SVDLinear4bit raise AttributeError(f"module {__name__} has no attribute {name}")
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/adalora/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings import torch from transformers.pytorch_utils import Conv1D from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.lora import LoraConfig, LoraModel from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils import ( TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, _freeze_adapter, _get_submodules, get_auto_gptq_quant_linear, get_quantization_config, ) from .gptq import SVDQuantLinear from .layer import AdaLoraLayer, RankAllocator, SVDLinear class AdaLoraModel(LoraModel): """ Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper: https://openreview.net/forum?id=lq62uWRJjiY Args: model ([`transformers.PreTrainedModel`]): The model to be adapted. config ([`AdaLoraConfig`]): The configuration of the AdaLora model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. Returns: `torch.nn.Module`: The AdaLora model. Example:: >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig >>> config = AdaLoraConfig( peft_type="ADALORA", task_type="SEQ_2_SEQ_LM", r=8, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.01, ) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> model = AdaLoraModel(model, config, "default") **Attributes**: - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model. """ # Note: don't redefine prefix here, it should be inherited from LoraModel def __init__(self, model, config, adapter_name): super().__init__(model, config, adapter_name) traininable_mode_counter = 0 for config in self.peft_config.values(): if not config.inference_mode: traininable_mode_counter += 1 if traininable_mode_counter > 1: raise ValueError( "AdaLoraModel supports only 1 trainable adapter. " "When using multiple adapters, set inference_mode to True for all adapters except the one you want to train." ) if self.peft_config[adapter_name].inference_mode: _freeze_adapter(self.model, adapter_name) else: self.trainable_adapter_name = adapter_name self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name) def _check_new_adapter_config(self, config: LoraConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ super()._check_new_adapter_config(config) traininable_mode_counter = 0 for config_ in self.peft_config.values(): if not config_.inference_mode: traininable_mode_counter += 1 if traininable_mode_counter > 1: raise ValueError( f"{self.__class__.__name__} supports only 1 trainable adapter. " "When using multiple adapters, set inference_mode to True for all adapters except the one " "you want to train." ) def _create_and_replace( self, lora_config, adapter_name, target, target_name, parent, **optional_kwargs, ): loaded_in_8bit = optional_kwargs.get("loaded_in_8bit", False) loaded_in_4bit = optional_kwargs.get("loaded_in_4bit", False) if (loaded_in_8bit or loaded_in_4bit) and not is_bnb_available(): raise ImportError( "To use AdaLora with 8-bit quantization, please install the `bitsandbytes` package. " "You can install it with `pip install bitsandbytes`." ) kwargs = { "r": lora_config.init_r, "lora_alpha": lora_config.lora_alpha, "lora_dropout": lora_config.lora_dropout, "fan_in_fan_out": lora_config.fan_in_fan_out, "init_lora_weights": lora_config.init_lora_weights, "loaded_in_8bit": loaded_in_8bit, "loaded_in_4bit": loaded_in_4bit, } quantization_config = get_quantization_config(self.model, method="gptq") if quantization_config is not None: kwargs["gptq_quantization_config"] = quantization_config # If it is not an AdaLoraLayer, create a new module, else update it with new adapters if not isinstance(target, AdaLoraLayer): new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs) if adapter_name != self.active_adapter: # adding an additional adapter: it is not automatically trainable new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) else: target.update_layer( adapter_name, lora_config.init_r, lora_config.lora_alpha, lora_config.lora_dropout, lora_config.init_lora_weights, ) @staticmethod def _create_new_module(lora_config, adapter_name, target, **kwargs): # avoid eager bnb import if is_bnb_available(): import bitsandbytes as bnb from .bnb import SVDLinear8bitLt if is_bnb_4bit_available(): from .bnb import SVDLinear4bit gptq_quantization_config = kwargs.get("gptq_quantization_config", None) AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config) loaded_in_8bit = kwargs.pop("loaded_in_8bit", False) loaded_in_4bit = kwargs.pop("loaded_in_4bit", False) if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): kwargs.update( { "has_fp16_weights": target.state.has_fp16_weights, "memory_efficient_backward": target.state.memory_efficient_backward, "threshold": target.state.threshold, "index": target.index, } ) new_module = SVDLinear8bitLt(target, adapter_name, **kwargs) elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit): fourbit_kwargs = kwargs.copy() fourbit_kwargs.update( { "compute_dtype": target.compute_dtype, "compress_statistics": target.weight.compress_statistics, "quant_type": target.weight.quant_type, } ) new_module = SVDLinear4bit(target, adapter_name, **fourbit_kwargs) elif AutoGPTQQuantLinear is not None and isinstance(target, AutoGPTQQuantLinear): new_module = SVDQuantLinear(target, adapter_name, **kwargs) else: if isinstance(target_base_layer, torch.nn.Linear): if kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " "Setting fan_in_fan_out to False." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False elif isinstance(target_base_layer, Conv1D): if not kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True else: raise ValueError( f"Target module {target} is not supported. " f"Currently, only `torch.nn.Linear` and `Conv1D` are supported." ) new_module = SVDLinear(target, adapter_name, **kwargs) return new_module @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[ model_config["model_type"] ] return peft_config def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.model, name) def forward(self, *args, **kwargs): outputs = self.model.forward(*args, **kwargs) if (getattr(outputs, "loss", None) is not None) and isinstance(outputs.loss, torch.Tensor): # Calculate the orthogonal regularization orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight if orth_reg_weight <= 0: raise ValueError("orth_reg_weight should be greater than 0. ") regu_loss = 0 num_param = 0 for n, p in self.model.named_parameters(): if ("lora_A" in n or "lora_B" in n) and self.trainable_adapter_name in n: para_cov = p @ p.T if "lora_A" in n else p.T @ p I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov)) I.requires_grad = False num_param += 1 regu_loss += torch.norm(para_cov - I, p="fro") if num_param > 0: regu_loss = regu_loss / num_param else: regu_loss = 0 outputs.loss += orth_reg_weight * regu_loss return outputs def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name): lora_config = self.peft_config[adapter_name] for name, rank_idx in rank_pattern.items(): if isinstance(rank_idx, list): rank = sum(rank_idx) elif isinstance(rank_idx, torch.Tensor): rank_idx = rank_idx.view(-1) rank = rank_idx.sum().item() else: raise ValueError("Unexcepted type of rank_idx") key = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1]) _, target, _ = _get_submodules(self.model, key) lora_E_weights = target.lora_E[adapter_name][rank_idx] lora_A_weights = target.lora_A[adapter_name][rank_idx] lora_B_weights = target.lora_B[adapter_name][:, rank_idx] ranknum = target.ranknum[adapter_name] target.update_layer( adapter_name, rank, lora_config.lora_alpha, lora_config.lora_dropout, lora_config.init_lora_weights, ) with torch.no_grad(): if rank > 0: target.lora_E[adapter_name].copy_(lora_E_weights) target.lora_A[adapter_name].copy_(lora_A_weights) target.lora_B[adapter_name].copy_(lora_B_weights) # The scaling is exactly as the previous target.ranknum[adapter_name].copy_(ranknum) def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name): for name, rank_idx in rank_pattern.items(): rank = sum(rank_idx) prefix = ".".join(name.split(".")[0:-2]) if adapter_name in name else ".".join(name.split(".")[0:-1]) for layer in ["lora_E", "lora_A", "lora_B"]: key = f"base_model.model.{prefix}.{layer}.{adapter_name}" if layer != "lora_B": state_dict[key] = ( state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key] ) else: state_dict[key] = ( state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key] ) return state_dict def update_and_allocate(self, global_step): lora_config = self.peft_config[self.trainable_adapter_name] # Update the importance score and allocate the budget if global_step < lora_config.total_step - lora_config.tfinal: _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step) if rank_pattern: lora_config.rank_pattern = rank_pattern # Finalize the budget allocation elif global_step == lora_config.total_step - lora_config.tfinal: _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True) # for some reason, this freezes the trainable parameters and nothing gets updates # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name) lora_config.rank_pattern = rank_pattern self.rankallocator.reset_ipt() # Currently using inefficient way to mask the unimportant weights using the rank pattern # due to problem mentioned above elif global_step > lora_config.total_step - lora_config.tfinal: self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern) # Pass the function and do forward propagation else: return None
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/adalora/gptq.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from .layer import AdaLoraLayer class SVDQuantLinear(torch.nn.Module, AdaLoraLayer): def __init__( self, base_layer, adapter_name, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, **kwargs, ) -> None: super().__init__() AdaLoraLayer.__init__(self, base_layer) # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter # for backwards compatibility self.quant_linear_module = base_layer self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) def forward(self, x: torch.Tensor) -> torch.Tensor: result = self.quant_linear_module(x) if self.disable_adapters: return result for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] lora_E = self.lora_E[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] ranknum = self.ranknum[active_adapter] + 1e-5 requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype if x.dtype != torch.float32: x = x.float() output = (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum # TODO: here, the dtype conversion is applied on the *whole expression*, # not the intermediate result, unlike for SVDLinear8bitLT and # SVDLinear4bit, is that correct? if requires_conversion: output = output.to(expected_dtype) result += output return result def __repr__(self) -> str: rep = super().__repr__() return "adalora." + rep
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/adalora/layer.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import Any, List, Optional import torch from torch import nn from peft.tuners.lora import LoraLayer from peft.utils import transpose class AdaLoraLayer(LoraLayer): # List all names of layers that may contain adapter weights # Note: ranknum doesn't need to be included as it is not an nn.Module adapter_layer_names = ("lora_A", "lora_B", "lora_E", "lora_embedding_A", "lora_embedding_B") # other_param_names is defined in LoraLayer def __init__(self, base_layer: nn.Module) -> None: super().__init__(base_layer) self.lora_E = nn.ParameterDict({}) self.lora_A = nn.ParameterDict({}) self.lora_B = nn.ParameterDict({}) self.ranknum = nn.ParameterDict({}) def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights): self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer # Actual trainable parameters # Right singular vectors self.lora_A[adapter_name] = nn.Parameter(torch.randn(r, self.in_features)) # Singular values self.lora_E[adapter_name] = nn.Parameter(torch.randn(r, 1)) # Left singular vectors self.lora_B[adapter_name] = nn.Parameter(torch.randn(self.out_features, r)) # The current rank self.ranknum[adapter_name] = nn.Parameter(torch.randn(1), requires_grad=False) self.ranknum[adapter_name].data.fill_(float(r)) self.ranknum[adapter_name].requires_grad = False self.scaling[adapter_name] = lora_alpha if lora_alpha > 0 else float(r) if init_lora_weights: self.reset_lora_parameters(adapter_name) if hasattr(self.get_base_layer(), "qweight"): # QuantLinear self.to(self.get_base_layer().qweight.device) else: self.to(self.get_base_layer().weight.device) self.set_adapter(self.active_adapters) def reset_lora_parameters(self, adapter_name): if adapter_name in self.lora_A.keys(): nn.init.normal_(self.lora_E[adapter_name], mean=0.0, std=0.02) nn.init.normal_(self.lora_A[adapter_name], mean=0.0, std=0.02) nn.init.normal_(self.lora_B[adapter_name], mean=0.0, std=0.02) class SVDLinear(nn.Module, AdaLoraLayer): # SVD-based adaptation by a dense layer def __init__( self, base_layer: nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, init_lora_weights: bool = True, **kwargs, ) -> None: super().__init__() AdaLoraLayer.__init__(self, base_layer) # Freezing the pre-trained weight matrix self.get_base_layer().weight.requires_grad = False self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ if self.merged: warnings.warn( f"Already following adapters were merged {','.join(self.merged_adapters)}. " f"You are now additionally merging {','.join(self.active_adapters)}." ) if adapter_names is None: adapter_names = self.active_adapters for active_adapter in adapter_names: base_layer = self.get_base_layer() if active_adapter in self.lora_A.keys(): if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() orig_weights += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data += self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def get_delta_weight(self, adapter) -> torch.Tensor: return ( transpose(self.lora_B[adapter] @ (self.lora_A[adapter] * self.lora_E[adapter]), self.fan_in_fan_out) * self.scaling[adapter] / (self.ranknum[adapter] + 1e-5) ) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # TODO: SVDLinear does not convert dtype, unlike lora linear, is that correct? if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] lora_E = self.lora_E[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] ranknum = self.ranknum[active_adapter] + 1e-5 result += (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum return result def __repr__(self) -> str: rep = super().__repr__() return "adalora." + rep class RankAllocator: """ The RankAllocator for AdaLoraModel. Paper: https://openreview.net/pdf?id=lq62uWRJjiY Args: config ([`AdaLoraConfig`]): The configuration of the AdaLora model. model: the model that we apply AdaLoRA to. """ def __init__(self, model, peft_config, adapter_name): self.peft_config = peft_config self.adapter_name = adapter_name self.beta1 = peft_config.beta1 self.beta2 = peft_config.beta2 assert self.beta1 > 0 and self.beta1 < 1 assert self.beta2 > 0 and self.beta2 < 1 self.reset_ipt() self._set_budget_scheduler(model) def set_total_step(self, total_step): self.peft_config.total_step = total_step def reset_ipt(self): self.ipt = {} self.exp_avg_ipt = {} self.exp_avg_unc = {} def _set_budget_scheduler(self, model): self.init_bgt = 0 self.name_set = set() for n, p in model.named_parameters(): if f"lora_A.{self.adapter_name}" in n: self.init_bgt += p.size(0) self.name_set.add(n.replace("lora_A", "%s")) self.name_set = sorted(self.name_set) # The total final rank budget self.target_bgt = self.peft_config.target_r * len(self.name_set) def budget_schedule(self, step: int): tinit = self.peft_config.tinit tfinal = self.peft_config.tfinal total_step = self.peft_config.total_step # Initial warmup if step <= tinit: budget = self.init_bgt mask_ind = False # Final fine-tuning elif step > total_step - tfinal: budget = self.target_bgt mask_ind = True else: # Budget decreasing with a cubic scheduler mul_coeff = 1 - (step - tinit) / (total_step - tfinal - tinit) budget = int((self.init_bgt - self.target_bgt) * (mul_coeff**3) + self.target_bgt) mask_ind = True if step % self.peft_config.deltaT == 0 else False return budget, mask_ind def update_ipt(self, model): # Update the sensitivity and uncertainty for every weight for n, p in model.named_parameters(): if "lora_" in n and self.adapter_name in n: if n not in self.ipt: self.ipt[n] = torch.zeros_like(p) self.exp_avg_ipt[n] = torch.zeros_like(p) self.exp_avg_unc[n] = torch.zeros_like(p) with torch.no_grad(): self.ipt[n] = (p * p.grad).abs().detach() # Sensitivity smoothing self.exp_avg_ipt[n] = self.beta1 * self.exp_avg_ipt[n] + (1 - self.beta1) * self.ipt[n] # Uncertainty quantification self.exp_avg_unc[n] = ( self.beta2 * self.exp_avg_unc[n] + (1 - self.beta2) * (self.ipt[n] - self.exp_avg_ipt[n]).abs() ) def _element_score(self, n): return self.exp_avg_ipt[n] * self.exp_avg_unc[n] def _combine_ipt(self, ipt_E, ipt_AB): ipt_AB = ipt_AB.sum(dim=1, keepdim=False) sum_ipt = ipt_E.view(-1) + ipt_AB.view(-1) return sum_ipt def mask_to_budget(self, model, budget): value_ipt = {} vector_ipt = {} triplet_ipt = {} # Get the importance score for A, E, B for n, p in model.named_parameters(): if f"lora_A.{self.adapter_name}" in n: entry_ipt = self._element_score(n) comb_ipt = torch.mean(entry_ipt, dim=1, keepdim=True) name_m = n.replace("lora_A", "%s") if name_m not in vector_ipt: vector_ipt[name_m] = [comb_ipt] else: vector_ipt[name_m].append(comb_ipt) if f"lora_B.{self.adapter_name}" in n: entry_ipt = self._element_score(n) comb_ipt = torch.mean(entry_ipt, dim=0, keepdim=False).view(-1, 1) name_m = n.replace("lora_B", "%s") if name_m not in vector_ipt: vector_ipt[name_m] = [comb_ipt] else: vector_ipt[name_m].append(comb_ipt) if f"lora_E.{self.adapter_name}" in n: entry_ipt = self._element_score(n) name_m = n.replace("lora_E", "%s") value_ipt[name_m] = entry_ipt all_score = [] # Calculate the score for each triplet for name_m in vector_ipt: ipt_E = value_ipt[name_m] ipt_AB = torch.cat(vector_ipt[name_m], dim=1) sum_ipt = self._combine_ipt(ipt_E, ipt_AB) name_E = name_m % "lora_E" triplet_ipt[name_E] = sum_ipt.view(-1, 1) all_score.append(sum_ipt.view(-1)) # Get the threshold by ranking ipt mask_threshold = torch.kthvalue( torch.cat(all_score), k=self.init_bgt - budget, )[0].item() rank_pattern = {} # Mask the unimportant triplets with torch.no_grad(): for n, p in model.named_parameters(): if f"lora_E.{self.adapter_name}" in n: p.masked_fill_(triplet_ipt[n] <= mask_threshold, 0.0) rank_pattern[n] = (~(triplet_ipt[n] <= mask_threshold)).view(-1).tolist() return rank_pattern def update_and_allocate(self, model, global_step, force_mask=False): # # Update the importance score and allocate the budget if global_step < self.peft_config.total_step - self.peft_config.tfinal: self.update_ipt(model) budget, mask_ind = self.budget_schedule(global_step) # Allocate the budget according to importance scores if mask_ind or force_mask: rank_pattern = self.mask_to_budget(model, budget) else: rank_pattern = None return budget, rank_pattern def mask_using_rank_pattern(self, model, rank_pattern): # Mask the unimportant triplets is_adapter_name_truncated = False if self.adapter_name not in next(iter(rank_pattern.keys())): is_adapter_name_truncated = True with torch.no_grad(): for n, p in model.named_parameters(): if f"lora_E.{self.adapter_name}" in n: key = n if not is_adapter_name_truncated else n.replace(f".{self.adapter_name}", "") mask = torch.Tensor(rank_pattern[key]).unsqueeze(-1).to(p.device) p.masked_fill_(~mask.bool(), 0.0)
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/adalora/bnb.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available from .layer import AdaLoraLayer if is_bnb_available(): class SVDLinear8bitLt(torch.nn.Module, AdaLoraLayer): # Low-rank matrix for SVD-based adaptation def __init__( self, base_layer: torch.nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, **kwargs, ) -> None: super().__init__() AdaLoraLayer.__init__(self, base_layer) # Freezing the pre-trained weight matrix self.get_base_layer().weight.requires_grad = False self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) def forward(self, x: torch.Tensor) -> torch.Tensor: # note: no check for self.merged because merging is not supported (yet) result = self.base_layer(x) if self.disable_adapters: return result for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype if x.dtype != torch.float32: x = x.float() lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] lora_E = self.lora_E[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] ranknum = self.ranknum[active_adapter] + 1e-5 output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T if requires_conversion: output = output.to(expected_dtype) output = output * scaling / ranknum # inplace operation on view is forbidden for MatMul8bitLtBackward, so avoid it result = result + output return result def __repr__(self) -> str: rep = super().__repr__() return "adalora." + rep if is_bnb_4bit_available(): class SVDLinear4bit(torch.nn.Module, AdaLoraLayer): # Low-rank matrix for SVD-based adaptation def __init__( self, base_layer: torch.nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, **kwargs, ) -> None: super().__init__() AdaLoraLayer.__init__(self, base_layer) # Freezing the pre-trained weight matrix self.get_base_layer().weight.requires_grad = False self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # note: no check for self.merged because merging is not supported (yet) result = self.base_layer(x, *args, **kwargs) if self.disable_adapters: return result # As per Tim Dettmers, for 4bit, we need to defensively clone here. # The reason is that in some cases, an error can occur that backprop # does not work on a manipulated view. This issue may be solved with # newer PyTorch versions but this would need extensive testing to be # sure. result = result.clone() for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] lora_E = self.lora_E[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] ranknum = self.ranknum[active_adapter] + 1e-5 requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype compute_dtype = lora_A.dtype if x.dtype != compute_dtype: x = x.to(compute_dtype) output = dropout(x) @ (lora_A * lora_E).T @ lora_B.T if requires_conversion: output = output.to(expected_dtype) output = output * scaling / ranknum result += output return result def __repr__(self) -> str: rep = super().__repr__() return "adalora." + rep
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/multitask_prompt_tuning/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from dataclasses import dataclass, field from typing import Optional, Union from peft.tuners.prompt_tuning import PromptTuningConfig from peft.utils import PeftType class MultitaskPromptTuningInit(str, enum.Enum): # initialize prompt with text TEXT = "TEXT" # initialize prompt with random matrix RANDOM = "RANDOM" # average the prefix and column matrices obtained during source training AVERAGE_SOURCE_TASKS = "AVERAGE_SOURCE_TASKS" # pick prefix and column matrices for a particular task obtained during source training EXACT_SOURCE_TASK = "EXACT_SOURCE_TASK" # only use the prompt embeddings trained during source training ONLY_SOURCE_SHARED = "ONLY_SOURCE_SHARED" @dataclass class MultitaskPromptTuningConfig(PromptTuningConfig): prompt_tuning_init: Union[MultitaskPromptTuningInit, str] = field( default=MultitaskPromptTuningInit.RANDOM, metadata={ "help": ( "How to initialize the prompt tuning parameters. Can be one of TEXT, RANDOM, AVERAGE_SOURCE_TASKS, " "EXACT_SOURCE_TASK, ONLY_SOURCE_SHARED." ), }, ) prompt_tuning_init_state_dict_path: Optional[str] = field( default=None, metadata={ "help": ( "The path of source state dict. This is required when training the downstream target prompt from " "the pretrained source prompt" ), }, ) prompt_tuning_init_task: Optional[int] = field(default=0, metadata={"help": "source task id for initialization"}) num_ranks: Optional[int] = field(default=1, metadata={"help": "ranks"}) num_tasks: Optional[int] = field(default=1, metadata={"help": "number of tasks"}) def __post_init__(self): self.peft_type = PeftType.MULTITASK_PROMPT_TUNING
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/multitask_prompt_tuning/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit from .model import MultitaskPromptEmbedding __all__ = ["MultitaskPromptTuningConfig", "MultitaskPromptTuningInit", "MultitaskPromptEmbedding"]
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/multitask_prompt_tuning/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from peft.tuners.prompt_tuning import PromptEmbedding from peft.utils import TaskType from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit # This code is adapted for the paper: https://arxiv.org/abs/2303.02861 and # constitutes the work done at MIT-IBM Watson Research Lab. class MultitaskPromptEmbedding(PromptEmbedding): def __init__(self, config: MultitaskPromptTuningConfig, word_embeddings): super().__init__(config, word_embeddings) self.num_tasks = config.num_tasks self.num_ranks = config.num_ranks self.num_virtual_tokens = config.num_virtual_tokens self.num_transformer_submodules = config.num_transformer_submodules if self.num_transformer_submodules is None: self.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 self.token_dim = config.token_dim total_virtual_tokens = self.num_virtual_tokens * self.num_transformer_submodules self.prefix_task_cols = torch.nn.Parameter( torch.normal( mean=0, std=0.02, size=(self.num_tasks, total_virtual_tokens, self.num_ranks), ) ) self.prefix_task_rows = torch.nn.Parameter( torch.normal( mean=0, std=0.02, size=(self.num_tasks, self.num_ranks, self.token_dim), ) ) if config.prompt_tuning_init in [ MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS, MultitaskPromptTuningInit.EXACT_SOURCE_TASK, MultitaskPromptTuningInit.ONLY_SOURCE_SHARED, ]: if config.prompt_tuning_init_state_dict_path is None: raise ValueError( f"prompt_tuning_init_state_dict_path needs to be specified with {config.prompt_tuning_init} " "init method" ) state_dict: dict = torch.load( config.prompt_tuning_init_state_dict_path, map_location=word_embeddings.device, ) if config.prompt_tuning_init in [ MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS, MultitaskPromptTuningInit.EXACT_SOURCE_TASK, ]: prefix_task_cols_: torch.Tensor = state_dict["prefix_task_cols"] prefix_task_rows_: torch.Tensor = state_dict["prefix_task_rows"] if config.prompt_tuning_init == MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS: prefix_task_cols_ = prefix_task_cols_.mean(0, keepdim=True) prefix_task_rows_ = prefix_task_rows_.mean(0, keepdim=True) elif config.prompt_tuning_init == MultitaskPromptTuningInit.EXACT_SOURCE_TASK: prefix_task_cols_ = prefix_task_cols_[config.prompt_tuning_init_task, ...].unsqueeze(0) prefix_task_rows_ = prefix_task_rows_[config.prompt_tuning_init_task, ...].unsqueeze(0) state_dict = { "embedding.weight": state_dict["prompt_embeddings"], "prefix_task_cols": prefix_task_cols_, "prefix_task_rows": prefix_task_rows_, } self.load_state_dict(state_dict, strict=True) elif config.prompt_tuning_init == MultitaskPromptTuningInit.ONLY_SOURCE_SHARED: state_dict = { "embedding.weight": state_dict["prompt_embeddings"], } self.load_state_dict(state_dict, strict=False) def forward(self, indices, task_ids): if task_ids is None: raise ValueError("task_ids cannot be None") prompt_embeddings = self.embedding(indices) task_cols = torch.index_select(self.prefix_task_cols, 0, task_ids) task_rows = torch.index_select(self.prefix_task_rows, 0, task_ids) task_prompts = torch.matmul(task_cols, task_rows) prompt_embeddings *= task_prompts return prompt_embeddings
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/lora/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import List, Literal, Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class LoftQConfig: """ This is the sub-configuration class to store the configuration of a [`LoraModel`]. Args: bits_pattern (`dict`): The mapping from layer names or regexp expression to bits which are different from the default bits specified by `bits`. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 2`}. bits (`int`): Quantization bits for LoftQ. iter (`int`): Alternating iterations for LoftQ. fake (`bool`): True: use fp16/fp32; used for first time to save weights. False: use bitsandbytes 4bit linear models. weights can't be saved. Recommend to set to True, save the weights and load the saved weights in 4 bits. """ loftq_bits: int = field(default=4, metadata={"help": "Quantization bits for LoftQ"}) loftq_iter: int = field(default=1, metadata={"help": "Alternating iterations for LoftQ"}) @dataclass class LoraConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`LoraModel`]. Args: r (`int`): Lora attention dimension. target_modules (`Union[List[str],str]`): The names of the modules to apply Lora to. lora_alpha (`int`): The alpha parameter for Lora scaling. lora_dropout (`float`): The dropout probability for Lora layers. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. bias (`str`): Bias type for Lora. Can be 'none', 'all' or 'lora_only'. If 'all' or 'lora_only', the corresponding biases will be updated during training. Be aware that this means that, even when disabling the adapters, the model will not produce the same output as the base model would have without adaptation. modules_to_save (`List[str]`):List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. layers_to_transform (`Union[List[int],int]`): The layer indexes to transform, if this argument is specified, it will apply the LoRA transformations on the layer indexes that are specified in this list. If a single integer is passed, it will apply the LoRA transformations on the layer at this index. layers_pattern (`str`): The layer pattern name, used only if `layers_to_transform` is different from `None` and if the layer pattern is not in the common layers pattern. rank_pattern (`dict`): The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. alpha_pattern (`dict`): The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. """ r: int = field(default=8, metadata={"help": "Lora attention dimension"}) target_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with Lora." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " }, ) lora_alpha: int = field(default=8, metadata={"help": "Lora alpha"}) lora_dropout: float = field(default=0.0, metadata={"help": "Lora dropout"}) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) bias: str = field(default="none", metadata={"help": "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"}) modules_to_save: Optional[List[str]] = field( default=None, metadata={ "help": "List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) init_lora_weights: bool | Literal["gaussian", "loftq"] = field( default=True, metadata={ "help": ( "How to initialize the weights of the LoRA layers. Passing True (default) results in the default " "initialization from the reference implementation from Microsoft. Passing 'gaussian' results " "in Gaussian initialization scaled by the LoRA rank for linear and layers. Setting the initialization " "to False leads to completely random initialization and is discouraged." "Pass `'loftq'` to use LoftQ initialization" ), }, ) layers_to_transform: Optional[Union[List[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. " "This only works when target_modules is a list of str." }, ) layers_pattern: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." "This only works when target_modules is a list of str." }, ) rank_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}" ) }, ) alpha_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}" ) }, ) megatron_config: Optional[dict] = field( default=None, metadata={ "help": ( "The TransformerConfig from Megatron, it is used to create LoRA's parallel linear layer." "You can get it like this, `core_transformer_config_from_args(get_args())`, " "this two functions are from Megatron." "You need to specify this parameter when you want to loraize the ColumnParallelLinear and " "RowParallelLinear layers of megatron." "It should be noted that we may not be able to use the `save_pretrained` and `from_pretrained` " "functions, because TransformerConfig may not necessarily be serialized." "But when using megatron, we can use `get_peft_model_state_dict` function and " "megatron's framework, they can also save and load models and configurations." ) }, ) megatron_core: Optional[str] = field( default="megatron.core", metadata={ "help": ( "The core module from Megatron, it is used to judge and create LoRA's parallel linear layer. " "It only needs to be passed in when you need to use your own modified megatron core module. " "Otherwise, it will use the default value `megatron.core`. " ) }, ) # dict type is used when loading config.json loftq_config: Union[LoftQConfig, dict] = field( default_factory=dict, metadata={ "help": ( "The configuration of LoftQ. If this is not None, then LoftQ will be used to quantize the backbone " "weights and initialize Lora layers." ) }, ) def __post_init__(self): self.peft_type = PeftType.LORA self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) # if target_modules is a regex expression, then layers_to_transform should be None if isinstance(self.target_modules, str) and self.layers_to_transform is not None: raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.") # if target_modules is a regex expression, then layers_pattern should be None if isinstance(self.target_modules, str) and self.layers_pattern is not None: raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.") # handle init_lora_weights and loftq_config if self.init_lora_weights == "loftq": import importlib if not importlib.util.find_spec("scipy"): raise ImportError("The required package 'scipy' is not installed. Please install it to continue.") if self.loftq_config is None: raise ValueError("`loftq_config` must be specified when `init_lora_weights` is 'loftq'.") # convert loftq_config to dict if self.loftq_config is not None and not isinstance(self.loftq_config, dict): self.loftq_config = vars(self.loftq_config)
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/lora/tp_layer.py
from typing import Any import torch import torch.nn as nn import torch.nn.init as init from .layer import LoraLayer class LoraParallelLinear(nn.Module, LoraLayer): """ When the target layer parallel_linear is RowParallelLinear, in order to keep the input and output shapes consistent, we need to split the lora matrix A into rows, and the lora_B at this time should be a complete linear layer; In the same way, when the target layer is ColumnParallelLinear, we perform column segmentation on lora_B, while lora_A is still a complete linear layer. """ def __init__( self, base_layer, adapter_name: str, backend, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, init_lora_weights: bool = True, **kwargs, ): super().__init__() LoraLayer.__init__(self, base_layer=base_layer) self.backend = backend self.is_paralle_a = isinstance(base_layer, backend.RowParallelLinear) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name megatron_config = kwargs["megatron_config"] parallel_linear_kwargs = {"megatron_config": megatron_config} init_method = init.xavier_normal_ if hasattr(megatron_config, "init_method"): init_method = megatron_config.init_method input_is_parallel = True gather_output = False if isinstance(base_layer, self.backend.RowParallelLinear): input_is_parallel = base_layer.input_is_parallel else: gather_output = base_layer.gather_output self.update_layer( adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, init_method, input_is_parallel, gather_output, **parallel_linear_kwargs, ) self.is_target_conv_1d_layer = False def update_layer( self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights, init_method=init.xavier_normal_, input_is_parallel=True, gather_output=False, **parallel_linear_kwargs, ): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer megatron_config = parallel_linear_kwargs["megatron_config"] # lora needs to be forced to upgrade to 32-bit precision, otherwise it will overflow megatron_config.params_dtype = torch.float32 if self.is_paralle_a: lora_a = self.backend.RowParallelLinear( input_size=self.in_features, output_size=r, bias=False, input_is_parallel=input_is_parallel, skip_bias_add=True, init_method=init_method, config=megatron_config, ) lora_b = nn.Linear(in_features=r, out_features=self.out_features, bias=False, dtype=torch.float32) else: lora_a = nn.Linear(in_features=self.in_features, out_features=r, bias=False, dtype=torch.float32) lora_b = self.backend.ColumnParallelLinear( input_size=r, output_size=self.out_features, bias=False, gather_output=gather_output, init_method=init_method, config=megatron_config, ) self.lora_A[adapter_name] = lora_a self.lora_B[adapter_name] = lora_b self.scaling[adapter_name] = lora_alpha / r if init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) weight = getattr(self.get_base_layer(), "weight", None) if weight is not None: # the layer is already completely initialized, this is an update if weight.dtype.is_floating_point or weight.dtype.is_complex: self.to(weight.device, dtype=weight.dtype) else: self.to(weight.device) self.set_adapter(self.active_adapters) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any): previous_dtype = x.dtype # If weight is used for matrix multiplication here, the final aggregation operation of the original # parallel_linear layer will be missing, so we need to directly call its forward function to obtain the # output of the original parallel_linear layer. if self.disable_adapters: if self.merged: self.unmerge() result, bias = self.base_layer(x, *args, **kwargs) elif self.merged: result, bias = self.base_layer(x, *args, **kwargs) else: result, bias = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = x.to(lora_A.weight.dtype) lora_result = lora_A(dropout(x)) if isinstance(lora_result, tuple): lora_result = lora_result[0] lora_result = lora_B(lora_result) if isinstance(lora_result, tuple): lora_result = lora_result[0] lora_result = lora_result * scaling result = result + lora_result result = result.to(previous_dtype) return result, bias
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/lora/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from peft.import_utils import is_bnb_4bit_available, is_bnb_available from .config import LoftQConfig, LoraConfig from .gptq import QuantLinear from .layer import Conv2d, Embedding, Linear, LoraLayer from .model import LoraModel __all__ = ["LoraConfig", "LoftQConfig", "Conv2d", "Embedding", "LoraLayer", "Linear", "LoraModel", "QuantLinear"] def __getattr__(name): if (name == "Linear8bitLt") and is_bnb_available(): from .bnb import Linear8bitLt return Linear8bitLt if (name == "Linear4bit") and is_bnb_4bit_available(): from .bnb import Linear4bit return Linear4bit raise AttributeError(f"module {__name__} has no attribute {name}")
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/lora/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import importlib import math import operator import re import warnings from dataclasses import asdict, replace from enum import Enum from functools import reduce from itertools import chain from typing import List, Optional import torch from tqdm import tqdm from transformers.pytorch_utils import Conv1D from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists from peft.utils import ( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _freeze_adapter, _get_submodules, get_auto_gptq_quant_linear, get_quantization_config, ) from .config import LoraConfig from .gptq import QuantLinear from .layer import Conv2d, Embedding, Linear, LoraLayer class LoraModel(BaseTuner): """ Creates Low Rank Adapter (LoRA) model from a pretrained transformers model. The method is described in detail in https://arxiv.org/abs/2106.09685. Args: model ([`torch.nn.Module`]): The model to be adapted. config ([`LoraConfig`]): The configuration of the Lora model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. Returns: `torch.nn.Module`: The Lora model. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM >>> from peft import LoraModel, LoraConfig >>> config = LoraConfig( ... task_type="SEQ_2_SEQ_LM", ... r=8, ... lora_alpha=32, ... target_modules=["q", "v"], ... lora_dropout=0.01, ... ) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> lora_model = LoraModel(model, config, "default") ``` ```py >>> import transformers >>> from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_int8_training >>> target_modules = ["q_proj", "k_proj", "v_proj", "out_proj", "fc_in", "fc_out", "wte"] >>> config = LoraConfig( ... r=4, lora_alpha=16, target_modules=target_modules, lora_dropout=0.1, bias="none", task_type="CAUSAL_LM" ... ) >>> model = transformers.GPTJForCausalLM.from_pretrained( ... "kakaobrain/kogpt", ... revision="KoGPT6B-ryan1.5b-float16", # or float32 version: revision=KoGPT6B-ryan1.5b ... pad_token_id=tokenizer.eos_token_id, ... use_cache=False, ... device_map={"": rank}, ... torch_dtype=torch.float16, ... load_in_8bit=True, ... ) >>> model = prepare_model_for_int8_training(model) >>> lora_model = get_peft_model(model, config) ``` **Attributes**: - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`LoraConfig`]): The configuration of the Lora model. """ prefix: str = "lora_" def __init__(self, model, config, adapter_name) -> None: super().__init__(model, config, adapter_name) def _check_new_adapter_config(self, config: LoraConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ # TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check # does not fully correspond to the error message. if (len(self.peft_config) > 1) and (config.bias != "none"): raise ValueError( f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " "set bias to 'none' for all adapters." ) @staticmethod def _check_target_module_exists(lora_config, key): return check_target_module_exists(lora_config, key) def _create_and_replace( self, lora_config, adapter_name, target, target_name, parent, current_key, **optional_kwargs, ): if current_key is None: raise ValueError("Current Key shouldn't be `None`") # Regexp matching - Find key which matches current target_name in patterns provided pattern_keys = list(chain(lora_config.rank_pattern.keys(), lora_config.alpha_pattern.keys())) target_name_key = next(filter(lambda key: re.match(f".*\.{key}$", current_key), pattern_keys), current_key) r = lora_config.rank_pattern.get(target_name_key, lora_config.r) alpha = lora_config.alpha_pattern.get(target_name_key, lora_config.lora_alpha) bias = hasattr(target, "bias") and target.bias is not None kwargs = { "r": r, "lora_alpha": alpha, "lora_dropout": lora_config.lora_dropout, "fan_in_fan_out": lora_config.fan_in_fan_out, "init_lora_weights": lora_config.init_lora_weights, } kwargs["loaded_in_8bit"] = optional_kwargs.pop("loaded_in_8bit", False) kwargs["loaded_in_4bit"] = optional_kwargs.pop("loaded_in_4bit", False) kwargs["bias"] = bias quantization_config = get_quantization_config(self.model, method="gptq") if quantization_config is not None: kwargs["gptq_quantization_config"] = quantization_config # TODO: better deal with that if isinstance(target, Conv2d): target.update_layer_conv2d( adapter_name, r, alpha, lora_config.lora_dropout, lora_config.init_lora_weights, ) elif isinstance(target, Embedding): target.update_layer_embedding( adapter_name, r, alpha, lora_config.lora_dropout, lora_config.init_lora_weights, ) elif isinstance(target, Linear): target.update_layer( adapter_name, r, alpha, lora_config.lora_dropout, lora_config.init_lora_weights, ) else: new_module = self._create_new_module(lora_config, adapter_name, target, **kwargs) if adapter_name != self.active_adapter: # adding an additional adapter: it is not automatically trainable new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) def _replace_module(self, parent, child_name, new_module, child): setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.base_layer if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) # dispatch to correct device for name, module in new_module.named_modules(): if (self.prefix in name) or ("ranknum" in name): weight = child.qweight if hasattr(child, "qweight") else child.weight module.to(weight.device) def _mark_only_adapters_as_trainable(self) -> None: for n, p in self.model.named_parameters(): if self.prefix not in n: p.requires_grad = False for active_adapter in self.active_adapters: bias = self.peft_config[active_adapter].bias if bias == "none": continue if bias == "all": for n, p in self.model.named_parameters(): if "bias" in n: p.requires_grad = True elif bias == "lora_only": for m in self.model.modules(): if isinstance(m, LoraLayer) and hasattr(m, "bias") and m.bias is not None: m.bias.requires_grad = True else: raise NotImplementedError(f"Requested bias: {bias}, is not implemented.") @staticmethod def _create_new_module(lora_config, adapter_name, target, **kwargs): # avoid eager bnb import if is_bnb_available(): import bitsandbytes as bnb from .bnb import Linear8bitLt if is_bnb_4bit_available(): from .bnb import Linear4bit gptq_quantization_config = kwargs.get("gptq_quantization_config", None) AutoGPTQQuantLinear = get_auto_gptq_quant_linear(gptq_quantization_config) loaded_in_8bit = kwargs.pop("loaded_in_8bit", False) loaded_in_4bit = kwargs.pop("loaded_in_4bit", False) if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target megatron_core = None if lora_config.megatron_config: megatron_core = importlib.import_module(lora_config.megatron_core) if loaded_in_8bit and isinstance(target_base_layer, bnb.nn.Linear8bitLt): eightbit_kwargs = kwargs.copy() eightbit_kwargs.update( { "has_fp16_weights": target.state.has_fp16_weights, "memory_efficient_backward": target.state.memory_efficient_backward, "threshold": target.state.threshold, "index": target.index, } ) new_module = Linear8bitLt(target, adapter_name, **eightbit_kwargs) elif loaded_in_4bit and is_bnb_4bit_available() and isinstance(target_base_layer, bnb.nn.Linear4bit): fourbit_kwargs = kwargs.copy() fourbit_kwargs.update( { "compute_dtype": target.compute_dtype, "compress_statistics": target.weight.compress_statistics, "quant_type": target.weight.quant_type, } ) new_module = Linear4bit(target, adapter_name, **fourbit_kwargs) elif AutoGPTQQuantLinear is not None and isinstance(target_base_layer, AutoGPTQQuantLinear): new_module = QuantLinear(target, adapter_name, **kwargs) target.weight = target.qweight elif isinstance(target_base_layer, torch.nn.Embedding): embedding_kwargs = kwargs.copy() embedding_kwargs.pop("fan_in_fan_out", None) embedding_kwargs.update(lora_config.loftq_config) new_module = Embedding(target, adapter_name, **embedding_kwargs) elif isinstance(target_base_layer, torch.nn.Conv2d): kwargs.update(lora_config.loftq_config) new_module = Conv2d(target, adapter_name, **kwargs) elif isinstance(target_base_layer, torch.nn.Linear): if kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " "Setting fan_in_fan_out to False." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False kwargs.update(lora_config.loftq_config) new_module = Linear(target, adapter_name, **kwargs) elif megatron_core and isinstance( target_base_layer, (megatron_core.tensor_parallel.ColumnParallelLinear, megatron_core.tensor_parallel.RowParallelLinear), ): from .tp_layer import LoraParallelLinear megatron_kwargs = kwargs.copy() megatron_config = lora_config.megatron_config if isinstance(megatron_config, dict): transformer_config_class = megatron_core.transformer.transformer_config.TransformerConfig megatron_config = transformer_config_class(**lora_config.megatron_config) megatron_kwargs["megatron_config"] = megatron_config if megatron_kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `ColumnParallelLinear` " "or `RowParallelLinear`. " "Setting fan_in_fan_out to False." ) megatron_kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = False new_module = LoraParallelLinear( base_layer=target, adapter_name=adapter_name, backend=megatron_core.tensor_parallel, **megatron_kwargs ) elif isinstance(target_base_layer, Conv1D): if not kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to False but the target module is `Conv1D`. " "Setting fan_in_fan_out to True." ) kwargs["fan_in_fan_out"] = lora_config.fan_in_fan_out = True kwargs.update(lora_config.loftq_config) new_module = Linear(target, adapter_name, is_target_conv_1d_layer=True, **kwargs) else: raise ValueError( f"Target module {target} is not supported. Currently, only the following modules are supported: " "`torch.nn.Linear`, `torch.nn.Embedding`, `torch.nn.Conv2d`, `transformers.pytorch_utils.Conv1D`." ) return new_module def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.model, name) def get_peft_config_as_dict(self, inference: bool = False): config_dict = {} for key, value in self.peft_config.items(): config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} if inference: config["inference_mode"] = True config_dict[key] = config return config def _set_adapter_layers(self, enabled: bool = True) -> None: for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self) -> None: """Enable all adapters. Call this if you have previously disabled all adapters and want to re-enable them. """ self._set_adapter_layers(enabled=True) def disable_adapter_layers(self) -> None: """Disable all adapters. When disabling all adapters, the model output corresponds to the output of the base model. """ for active_adapter in self.active_adapters: val = self.peft_config[active_adapter].bias if val != "none": msg = ( f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " "output as the the base model would without adaption." ) warnings.warn(msg) self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name: str | list[str]) -> None: """Set the active adapter(s). Args: adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. """ for module in self.model.modules(): if isinstance(module, LoraLayer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _unload_and_optionally_merge( self, merge=True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[List[str]] = None, ): if merge: if getattr(self.model, "quantization_method", None) == "gptq": raise ValueError("Cannot merge LORA layers when the model is gptq quantized") self._unloading_checks(adapter_names) key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: target.merge(safe_merge=safe_merge, adapter_names=adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` setattr(parent, target_name, target.modules_to_save[target.active_adapter]) return self.model def add_weighted_adapter( self, adapters, weights, adapter_name, combination_type="svd", svd_rank=None, svd_clamp=None, svd_full_matrices=True, svd_driver=None, ) -> None: """ This method adds a new adapter by merging the given adapters with the given weights. When using the `cat` combination_type you should be aware that rank of the resulting adapter will be equal to the sum of all adapters ranks. So it's possible that the mixed adapter may become too big and result in OOM errors. Args: adapters (`list`): List of adapter names to be merged. weights (`list`): List of weights for each adapter. adapter_name (`str`): Name of the new adapter. combination_type (`str`): Type of merging. Can be one of [`svd`, `linear`, `cat`]. When using the `cat` combination_type you should be aware that rank of the resulting adapter will be equal to the sum of all adapters ranks. So it's possible that the mixed adapter may become too big and result in OOM errors. svd_rank (`int`, *optional*): Rank of output adapter for svd. If None provided, will use max rank of merging adapters. svd_clamp (`float`, *optional*): A quantile threshold for clamping SVD decomposition output. If None is provided, do not perform clamping. Defaults to None. svd_full_matrices (`bool`, *optional*): Controls whether to compute the full or reduced SVD, and consequently, the shape of the returned tensors U and Vh. Defaults to True. svd_driver (`str`, *optional*): Name of the cuSOLVER method to be used. This keyword argument only works when merging on CUDA. Can be one of [None, `gesvd`, `gesvdj`, `gesvda`]. For more info please refer to `torch.linalg.svd` documentation. Defaults to None. """ if adapter_name in list(self.peft_config.keys()): return for adapter in adapters: if adapter not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter} does not exist") # if there is only one adapter, we can only use linear merging combination_type = "linear" if len(adapters) == 1 else combination_type adapters_ranks = [self.peft_config[adapter].r for adapter in adapters] if combination_type == "linear": # all adapters ranks should be same, new rank is just this value if len(set(adapters_ranks)) != 1: raise ValueError("All adapters must have the same r value when using `linear` combination_type") new_rank = adapters_ranks[0] elif combination_type == "cat": # adapters ranks may be different, new rank is sum of all ranks # be careful, because output adapter rank may be really big if mixing a lot of adapters new_rank = sum(adapters_ranks) elif combination_type == "svd": # new rank is the max of all ranks of the adapters if not provided new_rank = svd_rank or max(adapters_ranks) else: raise ValueError(f"Invalid combination_type: {combination_type}") target_module_types = [type(self.peft_config[adapter].target_modules) for adapter in adapters] if not target_module_types: raise ValueError(f"Found no adapter matching the names in {adapters}") if len(set(target_module_types)) > 1: raise ValueError( "all adapter configs should follow the same target modules type. " "Combining adapters with `target_modules` type being a mix of list/set and string is not supported." ) if target_module_types[0] == str: new_target_modules = "|".join(f"({self.peft_config[adapter].target_modules})" for adapter in adapters) elif target_module_types[0] == set: new_target_modules = reduce( operator.or_, (self.peft_config[adapter].target_modules for adapter in adapters) ) else: raise TypeError(f"Invalid type {target_module_types[0]} found in target_modules") self.peft_config[adapter_name] = replace( self.peft_config[adapters[0]], r=new_rank, lora_alpha=new_rank, target_modules=new_target_modules, ) self.inject_adapter(self.model, adapter_name) # Do we really need that? _freeze_adapter(self.model, adapter_name) key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, LoraLayer): if adapter_name in target.lora_A: target_lora_A = target.lora_A[adapter_name].weight target_lora_B = target.lora_B[adapter_name].weight elif adapter_name in target.lora_embedding_A: target_lora_A = target.lora_embedding_A[adapter_name] target_lora_B = target.lora_embedding_B[adapter_name] else: continue target_lora_A.data = target_lora_A.data * 0.0 target_lora_B.data = target_lora_B.data * 0.0 if combination_type == "linear": for adapter, weight in zip(adapters, weights): if adapter in target.lora_A: current_adapter_lora_A = target.lora_A[adapter].weight current_adapter_lora_B = target.lora_B[adapter].weight elif adapter in target.lora_embedding_A: current_adapter_lora_A = target.lora_embedding_A[adapter] current_adapter_lora_B = target.lora_embedding_B[adapter] else: continue target_lora_A.data += current_adapter_lora_A.data * math.sqrt(weight) * target.scaling[adapter] target_lora_B.data += current_adapter_lora_B.data * math.sqrt(weight) elif combination_type == "cat": loras_A, loras_B = [], [] for adapter, weight in zip(adapters, weights): if adapter in target.lora_A: current_adapter_lora_A = target.lora_A[adapter].weight current_adapter_lora_B = target.lora_B[adapter].weight elif adapter in target.lora_embedding_A: current_adapter_lora_A = target.lora_embedding_A[adapter] current_adapter_lora_B = target.lora_embedding_B[adapter] else: continue loras_A.append(current_adapter_lora_A.data * weight * target.scaling[adapter]) loras_B.append(current_adapter_lora_B.data) if len(loras_A) == 0: raise ValueError("No matching LoRAs found. Please raise an issue on Github.") loras_A = torch.cat(loras_A, dim=0) loras_B = torch.cat(loras_B, dim=1) target_lora_A.data[: loras_A.shape[0], :] = loras_A target_lora_B.data[:, : loras_B.shape[1]] = loras_B elif combination_type == "svd": target_lora_A.data, target_lora_B.data = self._svd_weighted_adapter( adapters, weights, new_rank, target, target_lora_A, target_lora_B, svd_clamp, full_matrices=svd_full_matrices, driver=svd_driver, ) def _svd_weighted_adapter( self, adapters, weights, new_rank, target, target_lora_A, target_lora_B, clamp=None, full_matrices=True, driver=None, ): valid_adapters = [] valid_weights = [] for adapter, weight in zip(adapters, weights): if adapter in target.lora_A or adapter in target.lora_embedding_A: valid_adapters.append(adapter) valid_weights.append(weight) # if no valid adapter, nothing to do if len(valid_adapters) == 0: raise ValueError("No matching LoRAs found. Please raise an issue on Github.") delta_weight = valid_weights[0] * target.get_delta_weight(valid_adapters[0]) for adapter, weight in zip(valid_adapters[1:], valid_weights[1:]): delta_weight += weight * target.get_delta_weight(adapter) conv2d = isinstance(target, Conv2d) if conv2d: conv2d_1x1 = target.weight.size()[2:4] == (1, 1) if not conv2d_1x1: delta_weight = delta_weight.flatten(start_dim=1) else: delta_weight = delta_weight.squeeze() if hasattr(target, "fan_in_fan_out") and target.fan_in_fan_out: delta_weight = delta_weight.T # based on https://github.com/kohya-ss/sd-scripts/blob/main/networks/svd_merge_lora.py#L114-L131 U, S, Vh = torch.linalg.svd(delta_weight, full_matrices=full_matrices, driver=driver) U = U[:, :new_rank] S = S[:new_rank] U = U @ torch.diag(S) Vh = Vh[:new_rank, :] if clamp is not None: dist = torch.cat([U.flatten(), Vh.flatten()]) hi_val = torch.quantile(dist, clamp) low_val = -hi_val U = U.clamp(low_val, hi_val) Vh = Vh.clamp(low_val, hi_val) if conv2d: U = U.reshape(target_lora_B.data.shape) Vh = Vh.reshape(target_lora_A.data.shape) return Vh, U def delete_adapter(self, adapter_name: str) -> None: """ Deletes an existing adapter. Args: adapter_name (str): Name of the adapter to be deleted. """ if adapter_name not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter_name} does not exist") del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, LoraLayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapters[:] self.active_adapter = new_adapter or [] def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[List[str]] = None ) -> torch.nn.Module: r""" This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModel >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b") >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample" >>> model = PeftModel.from_pretrained(base_model, peft_model_id) >>> merged_model = model.merge_and_unload() ``` """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self) -> torch.nn.Module: """ Gets back the base model by removing all the lora modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False)
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/lora/gptq.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from peft.tuners.lora.layer import LoraLayer class QuantLinear(torch.nn.Module, LoraLayer): def __init__( self, base_layer, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, **kwargs, ): super().__init__() LoraLayer.__init__(self, base_layer) # self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter # for backwards compatibility self.quant_linear_module = base_layer self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) def forward(self, x: torch.Tensor): # note: logic differs from default Linear because merging is not supported result = self.quant_linear_module(x) if self.disable_adapters: return result for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype x = x.to(lora_A.weight.dtype) output = lora_B(lora_A(dropout(x))) if requires_conversion: output = output.to(expected_dtype) output = output * scaling result += output return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep # TODO: Check if it is better as suggested by users https://github.com/PanQiWei/AutoGPTQ/pull/102 # def reset_lora_parameters(self, adapter_name): # if adapter_name in self.lora_A.keys(): # torch.nn.init.xavier_uniform_(self.lora_A[adapter_name].weight) # torch.nn.init.zeros_(self.lora_B[adapter_name].weight)
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/lora/layer.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import warnings from typing import Any, List, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F from transformers.pytorch_utils import Conv1D from peft.tuners.tuners_utils import BaseTunerLayer from peft.utils.other import transpose class LoraLayer(BaseTunerLayer): # All names of layers that may contain (trainable) adapter weights adapter_layer_names = ("lora_A", "lora_B", "lora_embedding_A", "lora_embedding_B") # All names of other parameters that may contain adapter-related parameters other_param_names = ("r", "lora_alpha", "scaling", "lora_dropout") def __init__(self, base_layer: nn.Module, **kwargs) -> None: self.base_layer = base_layer self.r = {} self.lora_alpha = {} self.scaling = {} self.lora_dropout = nn.ModuleDict({}) self.lora_A = nn.ModuleDict({}) self.lora_B = nn.ModuleDict({}) # For Embedding layer self.lora_embedding_A = nn.ParameterDict({}) self.lora_embedding_B = nn.ParameterDict({}) # Mark the weight as unmerged self._disable_adapters = False self.merged_adapters = [] self.kwargs = kwargs base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features elif isinstance(base_layer, nn.Conv2d): in_features, out_features = base_layer.in_channels, base_layer.out_channels elif isinstance(base_layer, nn.Embedding): in_features, out_features = base_layer.num_embeddings, base_layer.embedding_dim elif isinstance(base_layer, Conv1D): in_features, out_features = ( base_layer.weight.ds_shape if hasattr(base_layer.weight, "ds_shape") else base_layer.weight.shape ) elif hasattr(base_layer, "infeatures") and hasattr(base_layer, "outfeatures"): # QuantLinear in_features, out_features = base_layer.infeatures, base_layer.outfeatures elif hasattr(base_layer, "input_size") and hasattr(base_layer, "output_size"): # Megatron ColumnParallelLinear,RowParallelLinear in_features, out_features = base_layer.input_size, base_layer.output_size else: raise ValueError(f"Unsupported layer type {type(base_layer)}") self.in_features = in_features self.out_features = out_features def update_layer(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout.update(nn.ModuleDict({adapter_name: lora_dropout_layer})) # Actual trainable parameters if r > 0: self.lora_A[adapter_name] = nn.Linear(self.in_features, r, bias=False) self.lora_B[adapter_name] = nn.Linear(r, self.out_features, bias=False) self.scaling[adapter_name] = lora_alpha / r if init_lora_weights == "loftq": self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) weight = getattr(self.get_base_layer(), "weight", None) if weight is not None: # the layer is already completely initialized, this is an update if weight.dtype.is_floating_point or weight.dtype.is_complex: self.to(weight.device, dtype=weight.dtype) else: self.to(weight.device) self.set_adapter(self.active_adapters) def update_layer_conv2d(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer # Actual trainable parameters base_layer = self.get_base_layer() if r > 0: kernel_size = base_layer.kernel_size stride = base_layer.stride padding = base_layer.padding self.lora_A[adapter_name] = nn.Conv2d(self.in_features, r, kernel_size, stride, padding, bias=False) self.lora_B[adapter_name] = nn.Conv2d(r, self.out_features, (1, 1), (1, 1), bias=False) self.scaling[adapter_name] = lora_alpha / r if init_lora_weights == "loftq": self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) weight = getattr(base_layer, "weight", None) if weight is not None: # the layer is already completely initialized, this is an update self.to(base_layer.weight.device, dtype=weight.dtype) self.set_adapter(self.active_adapters) def update_layer_embedding(self, adapter_name, r, lora_alpha, lora_dropout, init_lora_weights): if r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {r}") self.r[adapter_name] = r self.lora_alpha[adapter_name] = lora_alpha if lora_dropout > 0.0: lora_dropout_layer = nn.Dropout(p=lora_dropout) else: lora_dropout_layer = nn.Identity() self.lora_dropout[adapter_name] = lora_dropout_layer # Actual trainable parameters if r > 0: weight_A = torch.randn((r, self.in_features)) weight_B = torch.randn((self.out_features, r)) self.lora_embedding_A[adapter_name] = nn.Parameter(weight_A) self.lora_embedding_B[adapter_name] = nn.Parameter(weight_B) self.scaling[adapter_name] = lora_alpha / r if init_lora_weights == "loftq": self.loftq_init(adapter_name) elif init_lora_weights: self.reset_lora_parameters(adapter_name, init_lora_weights) base_layer = self.get_base_layer() weight = getattr(base_layer, "weight", None) if weight is not None: # the layer is already completely initialized, this is an update self.to(base_layer.weight.device, dtype=weight.dtype) self.set_adapter(self.active_adapters) def reset_lora_parameters(self, adapter_name, init_lora_weights): if init_lora_weights is False: return if adapter_name in self.lora_A.keys(): if init_lora_weights is True: # initialize A the same way as the default for nn.Linear and B to zero # https://github.com/microsoft/LoRA/blob/a0a92e0f26c067cf94747bdbf1ce73793fa44d19/loralib/layers.py#L124 nn.init.kaiming_uniform_(self.lora_A[adapter_name].weight, a=math.sqrt(5)) elif init_lora_weights.lower() == "gaussian": nn.init.normal_(self.lora_A[adapter_name].weight, std=1 / self.r[adapter_name]) else: raise ValueError(f"Unknown initialization {init_lora_weights=}") nn.init.zeros_(self.lora_B[adapter_name].weight) if adapter_name in self.lora_embedding_A.keys(): # initialize a the same way as the default for nn.linear and b to zero nn.init.zeros_(self.lora_embedding_A[adapter_name]) nn.init.normal_(self.lora_embedding_B[adapter_name]) def loftq_init(self, adapter_name): from peft.utils.loftq_utils import loftq_init weight = self.get_base_layer().weight kwargs = { "num_bits": self.kwargs.get("loftq_bits", 4), "reduced_rank": self.r[adapter_name], "num_iter": self.kwargs.get("loftq_iter", 1), } qweight, lora_A, lora_B = loftq_init(weight, **kwargs) if adapter_name in self.lora_A.keys(): # initialize A the same way as the default for nn.Linear and B to zero self.lora_A[adapter_name].weight.data = lora_A self.lora_B[adapter_name].weight.data = lora_B if adapter_name in self.lora_embedding_A.keys(): # initialize a the same way as the default for nn.linear and b to zero self.lora_embedding_A[adapter_name].weight.data = lora_A self.lora_embedding_B[adapter_name].weight.data = lora_B self.get_base_layer().weight.data = qweight def set_scale(self, adapter, scale): if adapter not in self.scaling: # Ignore the case where the adapter is not in the layer return self.scaling[adapter] = scale * self.lora_alpha[adapter] / self.r[adapter] def scale_layer(self, scale: float) -> None: if scale == 1: return for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue self.scaling[active_adapter] *= scale def unscale_layer(self, scale=None) -> None: for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue if scale is None: self.scaling[active_adapter] = self.lora_alpha[active_adapter] / self.r[active_adapter] else: self.scaling[active_adapter] /= scale # Below code is based on https://github.com/microsoft/LoRA/blob/main/loralib/layers.py # and modified to work with PyTorch FSDP # ------------------------------------------------------------------------------------------ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License (MIT). See LICENSE in the repo root for license information. # ------------------------------------------------------------------------------------------ class Linear(nn.Module, LoraLayer): # Lora implemented in a dense layer def __init__( self, base_layer, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, fan_in_fan_out: bool = False, # Set this to True if the layer to replace stores weight like (fan_in, fan_out) is_target_conv_1d_layer: bool = False, init_lora_weights: Union[bool, str] = True, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer, **kwargs) self.fan_in_fan_out = fan_in_fan_out self._active_adapter = adapter_name self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) self.is_target_conv_1d_layer = is_target_conv_1d_layer def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ if self.merged: warnings.warn( f"Already following adapters were merged {','.join(self.merged_adapters)}. " f"You are now additionally merging {','.join(self.active_adapters)}." ) if adapter_names is None: adapter_names = self.active_adapters for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.clone() orig_weights += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data += self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_B[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = transpose(weight_B @ weight_A, self.fan_in_fan_out) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = x.to(lora_A.weight.dtype) result += lora_B(lora_A(dropout(x))) * scaling result = result.to(previous_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class Embedding(nn.Module, LoraLayer): # LoRA implemented in a Embedding layer def __init__( self, base_layer: nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: Union[bool, str] = True, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) self._active_adapter = adapter_name self.update_layer_embedding(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ if self.merged: warnings.warn( f"Already following adapters were merged {','.join(self.merged_adapters)}. " f"You are now additionally merging {','.join(self.active_adapters)}." ) if adapter_names is None: adapter_names = self.active_adapters for active_adapter in adapter_names: if active_adapter in self.lora_embedding_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.copy() orig_weights += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data += self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_embedding_A.keys(): self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_embedding_B[adapter].device dtype = self.lora_embedding_A[adapter].dtype # In case users wants to merge the adapter weights that are in # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 weight_A = self.lora_embedding_A[adapter] weight_B = self.lora_embedding_B[adapter] if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() output_tensor = transpose(weight_B @ weight_A, True) * self.scaling[adapter] if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_embedding_A[adapter] = weight_A.to(dtype) self.lora_embedding_B[adapter] = weight_B.to(dtype) return output_tensor def _embed(self, input: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: base_layer = self.get_base_layer() return F.embedding( input, weight, padding_idx=base_layer.padding_idx, max_norm=base_layer.max_norm, norm_type=base_layer.norm_type, scale_grad_by_freq=base_layer.scale_grad_by_freq, sparse=base_layer.sparse, ) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # TODO: no dtype conversion here, unlike in Linear, is that correct? if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.lora_embedding_A: continue embedding_A = self.lora_embedding_A[active_adapter].T embedding_B = self.lora_embedding_B[active_adapter].T scaling = self.scaling[active_adapter] after_A = self._embed(x, embedding_A) result += (after_A @ embedding_B) * scaling return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep class Conv2d(nn.Module, LoraLayer): # Lora implemented in a conv2d layer def __init__( self, base_layer: nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: Union[bool, str] = True, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) self._active_adapter = adapter_name self.update_layer_conv2d(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights inside the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ if self.merged: warnings.warn( f"Already following adapters were merged {','.join(self.merged_adapters)}. " f"You are now additionally merging {','.join(self.active_adapters)}." ) if adapter_names is None: adapter_names = self.active_adapters for active_adapter in adapter_names: if active_adapter in self.lora_A.keys(): base_layer = self.get_base_layer() if safe_merge: # Note that safe_merge will be slower than the normal merge # because of the copy operation. orig_weights = base_layer.weight.data.copy() orig_weights += self.get_delta_weight(active_adapter) if not torch.isfinite(orig_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = orig_weights else: base_layer.weight.data += self.get_delta_weight(active_adapter) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self.lora_A.keys(): self.get_base_layer().weight.data -= self.get_delta_weight(active_adapter) def get_delta_weight(self, adapter) -> torch.Tensor: """ Compute the delta weight for the given adapter. Args: adapter (str): The name of the adapter for which the delta weight should be computed. """ device = self.lora_B[adapter].weight.device dtype = self.lora_A[adapter].weight.dtype # In case users wants to merge the adapter weights that are in # float16 while being on CPU, we need to cast the weights to float32, perform the merge and then cast back to # float16 because the `@` and matmul operation in general is not supported in torch + cpu + fp16. cast_to_fp32 = device.type == "cpu" and dtype == torch.float16 weight_A = self.lora_A[adapter].weight weight_B = self.lora_B[adapter].weight if cast_to_fp32: weight_A = weight_A.float() weight_B = weight_B.float() # https://github.com/bmaltais/kohya_ss/blob/feb6728762a8f463d15ba936d189d4c3abfaa1ab/networks/lora.py#L117 if self.get_base_layer().weight.size()[2:4] == (1, 1): # conv2d 1x1 output_tensor = (weight_B.squeeze(3).squeeze(2) @ weight_A.squeeze(3).squeeze(2)).unsqueeze(2).unsqueeze( 3 ) * self.scaling[adapter] else: # conv2d 3x3 output_tensor = ( F.conv2d( weight_A.permute(1, 0, 2, 3), weight_B, ).permute(1, 0, 2, 3) * self.scaling[adapter] ) if cast_to_fp32: output_tensor = output_tensor.to(dtype=dtype) # cast back the weights self.lora_A[adapter].weight.data = weight_A.to(dtype) self.lora_B[adapter].weight.data = weight_B.to(dtype) return output_tensor def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] x = x.to(lora_A.weight.dtype) result += lora_B(lora_A(dropout(x))) * scaling result = result.to(previous_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/lora/bnb.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from typing import List, Optional import bitsandbytes as bnb import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available from peft.utils.other import transpose from .layer import LoraLayer if is_bnb_available(): class Linear8bitLt(torch.nn.Module, LoraLayer): # Lora implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ if self.merged: warnings.warn( f"Already following adapters were merged {','.join(self.merged_adapters)}. " f"You are now additionally merging {','.join(self.active_adapters)}." ) if adapter_names is None: adapter_names = self.active_adapters for active_adapter in adapter_names: if active_adapter not in self.lora_A.keys(): continue warnings.warn( "Merge lora module to 8-bit linear may get different generations due to rounding errors." ) lora_data = self.get_delta_weight(active_adapter) weight = self.get_base_layer().weight state = self.get_base_layer().state if state.SCB is None: state.SCB = weight.SCB # Dequantize the result of identity matrix and int8 weight because bitsandbytes does not support int8 # dequantization directly im = torch.eye(weight.data.shape[-1]).contiguous().half().to(weight.device) im, imt, SCim, SCimt, coo_tensorim = bnb.functional.double_quant(im) im, Sim = bnb.functional.transform(im, "col32") if state.CxB is None: state.CxB, state.SB = bnb.functional.transform(weight.data, to_order=state.formatB) out32, Sout32 = bnb.functional.igemmlt(im, state.CxB, Sim, state.SB) output = bnb.functional.mm_dequant(out32, Sout32, SCim, state.SCB, bias=None).t() w_data = output.to(lora_data.dtype).to(lora_data.device) + lora_data if safe_merge and not torch.isfinite(w_data).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) self.get_base_layer().weight = bnb.nn.Int8Params( w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights ).to(weight.device) state.reset_grads() self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter not in self.lora_A.keys(): continue warnings.warn( "Unmerge lora module to 8-bit linear may get different generations due to rounding errors." ) lora_data = self.get_delta_weight(active_adapter) weight = self.get_base_layer().weight state = self.get_base_layer().state if state.SCB is None: state.SCB = weight.SCB im = torch.eye(weight.data.shape[-1]).contiguous().half().to(weight.device) im, imt, SCim, SCimt, coo_tensorim = bnb.functional.double_quant(im) im, Sim = bnb.functional.transform(im, "col32") if state.CxB is None: state.CxB, state.SB = bnb.functional.transform(weight.data, to_order=state.formatB) out32, Sout32 = bnb.functional.igemmlt(im, state.CxB, Sim, state.SB) output = bnb.functional.mm_dequant(out32, Sout32, SCim, state.SCB, bias=None).t() w_data = output.to(lora_data.dtype).to(lora_data.device) - lora_data self.get_base_layer().weight = bnb.nn.Int8Params( w_data.to("cpu"), requires_grad=False, has_fp16_weights=weight.has_fp16_weights ).to(weight.device) state.reset_grads() def get_delta_weight(self, adapter): return ( transpose( self.lora_B[adapter].weight @ self.lora_A[adapter].weight, False, ) * self.scaling[adapter] ) def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype compute_dtype = lora_A.weight.dtype if x.dtype != compute_dtype: x = x.to(compute_dtype) output = lora_B(lora_A(dropout(x))) if requires_conversion: output = output.to(expected_dtype) output = output * scaling result += output return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep if is_bnb_4bit_available(): class Linear4bit(torch.nn.Module, LoraLayer): # Lora implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, r: int = 0, lora_alpha: int = 1, lora_dropout: float = 0.0, init_lora_weights: bool = True, **kwargs, ) -> None: super().__init__() LoraLayer.__init__(self, base_layer) self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights) def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ if self.merged: warnings.warn( f"Already following adapters were merged {','.join(self.merged_adapters)}. " f"You are now additionally merging {','.join(self.active_adapters)}." ) if adapter_names is None: adapter_names = self.active_adapters for active_adapter in adapter_names: if active_adapter not in self.lora_A.keys(): continue warnings.warn( "Merge lora module to 4-bit linear may get different generations due to rounding errors." ) # Refer to https://gist.github.com/ChrisHayduk/1a53463331f52dca205e55982baf9930 weight = self.get_base_layer().weight kwargs = weight.__dict__ lora_data = self.get_delta_weight(active_adapter) w_data = bnb.functional.dequantize_4bit(weight.data, weight.quant_state) + lora_data if safe_merge and not torch.isfinite(w_data).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to( weight.device ) self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter not in self.lora_A.keys(): continue warnings.warn( "Unmerge lora module to 4-bit linear may get different generations due to rounding errors." ) weight = self.get_base_layer().weight kwargs = weight.__dict__ lora_data = self.get_delta_weight(active_adapter) w_data = bnb.functional.dequantize_4bit(weight.data, weight.quant_state) - lora_data self.get_base_layer().weight = bnb.nn.Params4bit(w_data.to("cpu"), requires_grad=False, **kwargs).to( weight.device ) def get_delta_weight(self, adapter): return ( transpose( self.lora_B[adapter].weight @ self.lora_A[adapter].weight, False, ) * self.scaling[adapter] ) def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) # As per Tim Dettmers, for 4bit, we need to defensively clone here. # The reason is that in some cases, an error can occur that backprop # does not work on a manipulated view. This issue may be solved with # newer PyTorch versions but this would need extensive testing to be # sure. result = result.clone() for active_adapter in self.active_adapters: if active_adapter not in self.lora_A.keys(): continue lora_A = self.lora_A[active_adapter] lora_B = self.lora_B[active_adapter] dropout = self.lora_dropout[active_adapter] scaling = self.scaling[active_adapter] requires_conversion = not torch.is_autocast_enabled() if requires_conversion: expected_dtype = result.dtype x = x.to(lora_A.weight.dtype) output = lora_B(lora_A(dropout(x))) if requires_conversion: output = output.to(expected_dtype) output = output * scaling result += output return result def __repr__(self) -> str: rep = super().__repr__() return "lora." + rep
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/oft/config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import List, Optional, Union from peft.tuners.lycoris_utils import LycorisConfig from peft.utils import PeftType @dataclass class OFTConfig(LycorisConfig): """ This is the configuration class to store the configuration of a [`OFTModel`]. Args: r (`int`): OFT rank. module_dropout (`int`): The dropout probability for disabling OFT modules during training. target_modules (`Union[List[str],str]`): The names of the modules to apply OFT to. init_weights (`bool`): Whether to perform initialization of OFT weights. layers_to_transform (`Union[List[int],int]`): The layer indexes to transform, if this argument is specified, it will apply the OFT transformations on the layer indexes that are specified in this list. If a single integer is passed, it will apply the OFT transformations on the layer at this index. layers_pattern (`str`): The layer pattern name, used only if `layers_to_transform` is different from `None` and if the layer pattern is not in the common layers pattern. rank_pattern (`dict`): The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. modules_to_save (`List[str]`): The names of modules to be set as trainable except OFT parameters. coft (`bool`): Whether to use the constrainted variant of OFT or not. eps (`float`): The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True. block_share (`bool`): Whether to share the OFT parameters between blocks or not. """ r: int = field(default=8, metadata={"help": "OFT rank"}) module_dropout: float = field( default=0.0, metadata={"help": "The dropout probability for disabling OFT modules during training"} ) target_modules: Optional[Union[List[str], str]] = field( default=None, metadata={ "help": "List of module names or regex expression of the module names to replace with OFT." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' " }, ) init_weights: bool = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the OFT layers with their default initialization. Don't change " "this setting, except if you know exactly what you're doing." ), }, ) layers_to_transform: Optional[Union[List[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index." }, ) layers_pattern: Optional[str] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." }, ) modules_to_save: Optional[List[str]] = field( default=None, metadata={ "help": "List of modules apart from OFT layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) coft: bool = field( default=False, metadata={"help": "Whether to use the constrainted variant of OFT or not."}, ) eps: float = field( default=6e-5, metadata={ "help": "The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True." }, ) block_share: bool = field( default=False, metadata={"help": "Whether to share the OFT parameters between blocks or not."}, ) def __post_init__(self): self.peft_type = PeftType.OFT self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules )
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/oft/__init__.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .config import OFTConfig from .layer import Conv2d, Linear, OFTLayer from .model import OFTModel __all__ = ["OFTConfig", "OFTModel", "Conv2d", "Linear", "OFTLayer"]
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/oft/model.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from typing import Dict, Type, Union import torch from torch import nn from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner from .layer import Conv2d, Linear, OFTLayer class OFTModel(LycorisTuner): """ Creates Orthogonal Finetuning model from a pretrained model. The method is described in https://arxiv.org/abs/2306.07280 Args: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. config ([`OFTConfig`]): The configuration of the OFT model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. Returns: `torch.nn.Module`: The OFT model. Example: ```py >>> from diffusers import StableDiffusionPipeline >>> from peft import OFTModel, OFTConfig >>> config_te = OFTConfig( ... r=8, ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], ... module_dropout=0.0, ... init_weights=True, ... ) >>> config_unet = OFTConfig( ... r=8, ... target_modules=[ ... "proj_in", ... "proj_out", ... "to_k", ... "to_q", ... "to_v", ... "to_out.0", ... "ff.net.0.proj", ... "ff.net.2", ... ], ... module_dropout=0.0, ... init_weights=True, ... ) >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> model.text_encoder = OFTModel(model.text_encoder, config_te, "default") >>> model.unet = OFTModel(model.unet, config_unet, "default") ``` **Attributes**: - **model** ([`~torch.nn.Module`]) -- The model to be adapted. - **peft_config** ([`OFTConfig`]): The configuration of the OFT model. """ prefix: str = "oft_" layers_mapping: Dict[Type[torch.nn.Module], Type[OFTLayer]] = { torch.nn.Conv2d: Conv2d, torch.nn.Linear: Linear, } def _create_and_replace( self, config: LycorisConfig, adapter_name: str, target: Union[OFTLayer, nn.Module], target_name: str, parent: nn.Module, current_key: str, **optional_kwargs, ) -> None: """ A private method to create and replace the target module with the adapter module. """ # Regexp matching - Find key which matches current target_name in patterns provided pattern_keys = list(config.rank_pattern.keys()) target_name_key = next(filter(lambda key: re.match(f"(.*\.)?{key}$", current_key), pattern_keys), target_name) kwargs = config.to_dict() kwargs["r"] = config.rank_pattern.get(target_name_key, config.r) if isinstance(target, OFTLayer): target.update_layer(adapter_name, **kwargs) else: new_module = self._create_new_module(config, adapter_name, target, **kwargs) self._replace_module(parent, target_name, new_module, target)
0
hf_public_repos/peft/src/peft/tuners
hf_public_repos/peft/src/peft/tuners/oft/layer.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import warnings from typing import Any, List, Optional, Set, Tuple import torch import torch.nn as nn from peft.tuners.lycoris_utils import LycorisLayer class OFTLayer(nn.Module, LycorisLayer): # All names of layers that may contain adapter weights adapter_layer_names = ("oft_r",) # other_param_names is defined on parent class def __init__(self, base_layer: nn.Module): super().__init__() LycorisLayer.__init__(self, base_layer) # OFT info self.oft_r = nn.ParameterDict({}) self.coft = {} self.eps = {} self.block_share = {} @property def _available_adapters(self) -> Set[str]: return {*self.oft_r} def create_adapter_parameters(self, adapter_name: str, r: int, shape: Tuple[int, ...], block_share: bool): if block_share: self.oft_r[adapter_name] = nn.Parameter(torch.empty(1, math.ceil(shape[0] / r), math.ceil(shape[0] / r))) else: self.oft_r[adapter_name] = nn.Parameter(torch.empty(r, math.ceil(shape[0] / r), math.ceil(shape[0] / r))) def reset_adapter_parameters(self, adapter_name: str): nn.init.zeros_(self.oft_r[adapter_name]) def reset_adapter_parameters_random(self, adapter_name: str): nn.init.kaiming_uniform_(self.oft_r[adapter_name], a=math.sqrt(5)) def update_layer( self, adapter_name: str, r: int, module_dropout: float, init_weights: bool, coft: bool = False, eps: float = 6e-5, block_share: bool = False, **kwargs, ) -> None: """Internal function to create oft adapter Args: adapter_name (`str`): Name for the adapter to add. r (`int`): Rank for the added adapter. module_dropout (`float`): The dropout probability for disabling adapter during training. init_weights (`bool`): Whether to initialize weights. coft (`bool`): Whether to use the constrainted variant of OFT or not. eps (`float`): The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True. block_share (`bool`): Whether to share the OFT parameters between blocks or not. """ self.r[adapter_name] = r self.module_dropout[adapter_name] = module_dropout self.coft[adapter_name] = coft self.block_share[adapter_name] = block_share # Determine shape of OFT weights base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): shape = tuple(base_layer.weight.shape) elif isinstance(base_layer, nn.Conv2d): shape = ( base_layer.out_channels, base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1], ) else: raise TypeError(f"OFT is not implemented for base layers of type {type(base_layer).__name__}") self.eps[adapter_name] = eps * math.ceil(shape[0] / r) * math.ceil(shape[0] / r) # Create weights with provided shape self.create_adapter_parameters(adapter_name, r, shape, block_share) # Initialize weights if init_weights: self.reset_adapter_parameters(adapter_name) else: self.reset_adapter_parameters_random(adapter_name) # Move new weights to device weight = getattr(self.get_base_layer(), "weight", None) if weight is not None: # the layer is already completely initialized, this is an update if weight.dtype.is_floating_point or weight.dtype.is_complex: self.to(weight.device, dtype=weight.dtype) else: self.to(weight.device) self.set_adapter(self.active_adapters) def unscale_layer(self, scale=None) -> None: # scale is not used pass def merge(self, safe_merge: bool = False, adapter_names: Optional[List[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If `True`, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If `None`, all active adapters will be merged. Defaults to `None`. """ if self.merged: warnings.warn( f"Already following adapters were merged {','.join(self.merged_adapters)}. " f"You are now additionally merging {','.join(self.active_adapters)}." ) if adapter_names is None: adapter_names = self.active_adapters for active_adapter in adapter_names: if active_adapter in self._available_adapters: base_layer = self.get_base_layer() orig_weights = base_layer.weight.data if isinstance(base_layer, nn.Linear): orig_weights = torch.transpose(orig_weights, 0, 1) elif isinstance(base_layer, nn.Conv2d): orig_weights = orig_weights.view( [ base_layer.out_channels, base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1], ] ) orig_weights = torch.transpose(orig_weights, 0, 1) delta_weight = self.get_delta_weight(active_adapter) if orig_weights.shape[1] != delta_weight.shape[1]: # when in channels is not divisible by r delta_weight = delta_weight[: orig_weights.shape[1], : orig_weights.shape[1]] new_weights = torch.mm(orig_weights, delta_weight) if isinstance(base_layer, nn.Linear): new_weights = torch.transpose(new_weights, 0, 1) elif isinstance(base_layer, nn.Conv2d): new_weights = torch.transpose(new_weights, 0, 1) new_weights = new_weights.view( [ base_layer.out_channels, base_layer.in_channels, base_layer.kernel_size[0], base_layer.kernel_size[1], ] ) if safe_merge and not torch.isfinite(new_weights).all(): raise ValueError( f"NaNs detected in the merged weights. The adapter {active_adapter} seems to be broken" ) base_layer.weight.data = new_weights self.merged_adapters.append(active_adapter) def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ if not self.merged: warnings.warn("Already unmerged. Nothing to do.") return while len(self.merged_adapters) > 0: active_adapter = self.merged_adapters.pop() if active_adapter in self._available_adapters: base_layer = self.get_base_layer() new_weights = base_layer.weight.data if isinstance(base_layer, nn.Linear): new_weights = torch.transpose(new_weights, 0, 1) elif isinstance(base_layer, nn.Conv2d): new_weights = new_weights.view( [ base_layer.out_channels, base_layer.in_channels * base_layer.kernel_size[0] * base_layer.kernel_size[1], ] ) new_weights = torch.transpose(new_weights, 0, 1) delta_weight = self.get_delta_weight(active_adapter) if new_weights.shape[1] != delta_weight.shape[1]: # when in channels is not divisible by r delta_weight = delta_weight[: new_weights.shape[1], : new_weights.shape[1]] delta_inv = torch.inverse(delta_weight) orig_weights = torch.mm(new_weights, delta_inv) if isinstance(base_layer, nn.Linear): orig_weights = torch.transpose(orig_weights, 0, 1) elif isinstance(base_layer, nn.Conv2d): orig_weights = torch.transpose(orig_weights, 0, 1) orig_weights = orig_weights.reshape( [ base_layer.out_channels, base_layer.in_channels, base_layer.kernel_size[0], base_layer.kernel_size[1], ] ) base_layer.weight.data = orig_weights def get_delta_weight(self, adapter_name: str) -> torch.Tensor: rank = self.r[adapter_name] coft = self.coft[adapter_name] eps = self.eps[adapter_name] opt_r = self.oft_r[adapter_name] if coft: with torch.no_grad(): opt_r.copy_(self._project_batch(opt_r, eps=eps)) orth_rotate = self._cayley_batch(opt_r) weight = self._block_diagonal(orth_rotate, rank) return weight # Copied from https://github.com/Zeju1997/oft/blob/84cebb965df69781e3d9c3c875f5980b421eaf24/oft-control/oft.py#L144 def _cayley_batch(self, data: torch.Tensor) -> torch.Tensor: b, r, c = data.shape # Ensure the input matrix is skew-symmetric skew = 0.5 * (data - data.transpose(1, 2)) I = torch.eye(r, device=data.device).unsqueeze(0).expand(b, r, c) # Perform the Cayley parametrization Q = torch.bmm(I - skew, torch.inverse(I + skew)) return Q # Copied from https://github.com/Zeju1997/oft/blob/84cebb965df69781e3d9c3c875f5980b421eaf24/oft-control/oft.py#L155 def _block_diagonal(self, oft_r: torch.Tensor, rank: int) -> torch.Tensor: if oft_r.shape[0] == 1: # block share blocks = [oft_r[0, ...] for i in range(rank)] else: blocks = [oft_r[i, ...] for i in range(rank)] # Use torch.block_diag to create the block diagonal matrix A = torch.block_diag(*blocks) return A # Copied from https://github.com/Zeju1997/oft/blob/84cebb965df69781e3d9c3c875f5980b421eaf24/oft-control/oft.py#L52 def _project_batch(self, oft_r, eps=1e-5): # scaling factor for each of the smaller block matrix eps = eps * 1 / torch.sqrt(torch.tensor(oft_r.shape[0])) I = ( torch.zeros((oft_r.size(1), oft_r.size(1)), device=oft_r.device, dtype=oft_r.dtype) .unsqueeze(0) .expand_as(oft_r) ) diff = oft_r - I norm_diff = torch.norm(oft_r - I, dim=(1, 2), keepdim=True) mask = (norm_diff <= eps).bool() out = torch.where(mask, oft_r, I + eps * (diff / norm_diff)) return out def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: if self.merged: self.unmerge() result = self.base_layer(x, *args, **kwargs) elif self.merged: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) if len(result.shape) == 4: result = result.permute(0, 2, 3, 1) base_layer = self.get_base_layer() base_bias = base_layer.bias if base_bias is not None: # Bias should be added after OFT forward result = result - base_bias.data # Execute all the adapters for active_adapter in self.active_adapters: if active_adapter not in self._available_adapters: continue module_dropout = self.module_dropout[active_adapter] # Modify current execution weights if (not self.training) or (self.training and torch.rand(1) > module_dropout): result = self._get_delta_activations(active_adapter, result, *args, **kwargs) if base_bias is not None: result = result + base_bias.data if len(result.shape) == 4: result = result.permute(0, 3, 1, 2) result = result.to(previous_dtype) return result class Linear(OFTLayer): """OFT implemented in Linear layer""" def __init__( self, base_layer: nn.Module, adapter_name: str = "default", r: int = 0, module_dropout: float = 0.0, init_weights: bool = True, **kwargs, ): super().__init__(base_layer) # Create adapter and set it active self._active_adapter = adapter_name self.update_layer(adapter_name, r, module_dropout, init_weights, **kwargs) def _get_delta_activations( self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any ) -> torch.Tensor: delta_weight = self.get_delta_weight(adapter_name) base_layer = self.get_base_layer() base_weight = base_layer.weight.data delta_weight = delta_weight[: base_weight.shape[0], : base_weight.shape[0]] # don't add bias here, because the bias will be added after OFT forward return torch.matmul(input, delta_weight) def __repr__(self) -> str: rep = super().__repr__() return "oft." + rep class Conv2d(OFTLayer): """OFT implemented in Conv2d layer""" def __init__( self, base_layer: nn.Module, adapter_name: str = "default", r: int = 0, module_dropout: float = 0.0, init_weights: bool = True, **kwargs, ): super().__init__(base_layer) # Create adapter and set it active self._active_adapter = adapter_name self.update_layer(adapter_name, r, module_dropout, init_weights, **kwargs) def _get_delta_activations( self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any ) -> torch.Tensor: delta_weight = self.get_delta_weight(adapter_name) base_layer = self.get_base_layer() base_weight = base_layer.weight.data delta_weight = delta_weight[: base_weight.shape[0], : base_weight.shape[0]] # don't add bias here, because the bias will be added after OFT forward return torch.matmul(input, delta_weight) def __repr__(self) -> str: rep = super().__repr__() return "oft." + rep
0
hf_public_repos
hf_public_repos/text-generation-inference/Cargo.toml
[workspace] members = [ "benchmark", "router", "router/client", "router/grpc-metadata", "launcher" ] [workspace.package] version = "1.2.0" edition = "2021" authors = ["Olivier Dehaene"] homepage = "https://github.com/huggingface/text-generation-inference" [profile.release] debug = 1 incremental = true lto = "off" panic = "abort"
0
hf_public_repos
hf_public_repos/text-generation-inference/update_doc.py
import subprocess import argparse def main(): parser = argparse.ArgumentParser() parser.add_argument("--check", action="store_true") args = parser.parse_args() output = subprocess.check_output(["text-generation-launcher", "--help"]).decode( "utf-8" ) wrap_code_blocks_flag = "<!-- WRAP CODE BLOCKS -->" final_doc = f"# Text-generation-launcher arguments\n\n{wrap_code_blocks_flag}\n\n" lines = output.split("\n") header = "" block = [] for line in lines: if line.startswith(" -") or line.startswith(" -"): rendered_block = '\n'.join(block) if header: final_doc += f"## {header}\n```shell\n{rendered_block}\n```\n" else: final_doc += f"```shell\n{rendered_block}\n```\n" block = [] tokens = line.split("<") if len(tokens)>1: header = tokens[-1][:-1] else: header = line.split("--")[-1] header = header.upper().replace("-", "_") block.append(line) rendered_block = '\n'.join(block) final_doc += f"## {header}\n```shell\n{rendered_block}\n```\n" block = [] filename = "docs/source/basic_tutorials/launcher.md" if args.check: with open(filename, "r") as f: doc = f.read() if doc != final_doc: tmp = "launcher.md" with open(tmp, "w") as g: g.write(final_doc) diff = subprocess.run( ["diff", tmp, filename], capture_output=True ).stdout.decode("utf-8") print(diff) raise Exception( "Doc is not up-to-date, run `python update_doc.py` in order to update it" ) else: with open(filename, "w") as f: f.write(final_doc) if __name__ == "__main__": main()
0
hf_public_repos
hf_public_repos/text-generation-inference/sagemaker-entrypoint.sh
#!/bin/bash if [[ -z "${HF_MODEL_ID}" ]]; then echo "HF_MODEL_ID must be set" exit 1 fi export MODEL_ID="${HF_MODEL_ID}" if [[ -n "${HF_MODEL_REVISION}" ]]; then export REVISION="${HF_MODEL_REVISION}" fi if [[ -n "${SM_NUM_GPUS}" ]]; then export NUM_SHARD="${SM_NUM_GPUS}" fi if [[ -n "${HF_MODEL_QUANTIZE}" ]]; then export QUANTIZE="${HF_MODEL_QUANTIZE}" fi if [[ -n "${HF_MODEL_TRUST_REMOTE_CODE}" ]]; then export TRUST_REMOTE_CODE="${HF_MODEL_TRUST_REMOTE_CODE}" fi text-generation-launcher --port 8080
0
hf_public_repos
hf_public_repos/text-generation-inference/LICENSE
Hugging Face Optimized Inference License 1.0 (HFOILv1.0) This License Agreement governs the use of the Software and its Modifications. It is a binding agreement between the Licensor and You. This License Agreement shall be referred to as Hugging Face Optimized Inference License 1.0 or HFOILv1.0. We may publish revised versions of this License Agreement from time to time. Each version will be given a distinguished number. By downloading, accessing, modifying, distributing or otherwise using the Software, You consent to all of the terms and conditions below. So, if You do not agree with those, please do not download, access, modify, distribute, or use the Software. 1. PERMISSIONS You may use, modify and distribute the Software pursuant to the following terms and conditions: Copyright License. Subject to the terms and conditions of this License Agreement and where and as applicable, each Contributor hereby grants You a perpetual, worldwide, non-exclusive, royalty-free, copyright license to reproduce, prepare, publicly display, publicly perform, sublicense under the terms herein, and distribute the Software and Modifications of the Software. Patent License. Subject to the terms and conditions of this License Agreement and where and as applicable, each Contributor hereby grants You a perpetual, worldwide, non-exclusive, royalty-free patent license to make, have made, Use, offer to sell, sell, import, and otherwise transfer the Software, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Software to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Software or a Contribution incorporated within the Software constitutes direct or contributory patent infringement, then any rights granted to You under this License Agreement for the Software shall terminate as of the date such litigation is filed. No other rights. All rights not expressly granted herein are retained. 2. RESTRICTIONS You may not distribute the Software as a hosted or managed, and paid service, where the service grants users access to any substantial set of the features or functionality of the Software. If you wish to do so, You will need to be granted additional rights from the Licensor which will be subject to a separate mutually agreed agreement. You may not sublicense the Software under any other terms than those listed in this License. 3. OBLIGATIONS When You modify the Software, You agree to: - attach a notice stating the Modifications of the Software You made; and - attach a notice stating that the Modifications of the Software are released under this License Agreement. When You distribute the Software or Modifications of the Software, You agree to: - give any recipients of the Software a copy of this License Agreement; - retain all Explanatory Documentation; and if sharing the Modifications of the Software, add Explanatory Documentation documenting the changes made to create the Modifications of the Software; - retain all copyright, patent, trademark and attribution notices. 4. MISCELLANEOUS Termination. Licensor reserves the right to restrict Use of the Software in violation of this License Agreement, upon which Your licenses will automatically terminate. Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Software by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. Trademarks and related. Nothing in this License Agreement permits You (i) to make Use of Licensors’ trademarks, trade names, or logos, (ii) otherwise suggest endorsement by Licensor, or (iii) misrepresent the relationship between the parties; and any rights not expressly granted herein are reserved by the Licensors. Output You generate. Licensor claims no rights in the Output. You agree not to contravene any provision as stated in the License Agreement with your Use of the Output. Disclaimer of Warranty. Except as expressly provided otherwise herein, and to the fullest extent permitted by law, Licensor provides the Software (and each Contributor provides its Contributions) AS IS, and Licensor disclaims all warranties or guarantees of any kind, express or implied, whether arising under any law or from any usage in trade, or otherwise including but not limited to the implied warranties of merchantability, non-infringement, quiet enjoyment, fitness for a particular purpose, or otherwise. You are solely responsible for determining the appropriateness of the Software and Modifications of the Software for your purposes (including your use or distribution of the Software and Modifications of the Software), and assume any risks associated with Your exercise of permissions under this License Agreement. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License Agreement or out of the Use or inability to Use the Software (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, model failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. Accepting Warranty or Additional Liability. While sharing the Software or Modifications of the Software thereof, You may choose to offer and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License Agreement. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of Licensor or any other Contributor, and you hereby agree to indemnify, defend, and hold Licensor and each other Contributor (and their successors or assigns) harmless for any liability incurred by, or claims asserted against, such Licensor or Contributor (and their successors or assigns) by reason of your accepting any such warranty or additional liability. Severability. This License Agreement is a license of copyright and patent rights and an agreement in contract between You and the Licensor. If any provision of this License Agreement is held to be invalid, illegal or unenforceable, the remaining provisions shall be unaffected thereby and remain valid as if such provision had not been set forth herein. 5. DEFINITIONS “Contribution” refers to any work of authorship, including the original version of the Software and any Modifications of the Software that is intentionally submitted to Licensor for inclusion in the Software by the copyright owner or by an individual or entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, “submitted” means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Software, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as “Not a Contribution.” “Contributor” refers to Licensor and any individual or entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Software. “Data” refers to a collection of information extracted from the dataset used with the Model, including to train, pretrain, or otherwise evaluate the Model. The Data is not licensed under this License Agreement. “Explanatory Documentation” refers to any documentation or related information including but not limited to model cards or data cards dedicated to inform the public about the characteristics of the Software. Explanatory documentation is not licensed under this License. "License Agreement" refers to these terms and conditions. “Licensor” refers to the rights owners or entity authorized by the rights owners that are granting the terms and conditions of this License Agreement. “Model” refers to machine-learning based assemblies (including checkpoints), consisting of learnt weights and parameters (including optimizer states), corresponding to a model architecture as embodied in Software source code. Source code is not licensed under this License Agreement. “Modifications of the Software” refers to all changes to the Software, including without limitation derivative works of the Software. “Output” refers to the results of operating the Software. “Share” refers to any transmission, reproduction, publication or other sharing of the Software or Modifications of the Software to a third party, including providing the Softwaire as a hosted service made available by electronic or other remote means, including - but not limited to - API-based or web access. “Software” refers to the software and Model (or parts of either) that Licensor makes available under this License Agreement. “Third Parties” refers to individuals or legal entities that are not under common control with Licensor or You. “Use” refers to anything You or your representatives do with the Software, including but not limited to generating any Output, fine tuning, updating, running, training, evaluating and/or reparametrizing the Model. "You" (or "Your") refers to an individual or Legal Entity exercising permissions granted by this License Agreement and/or making Use of the Software for whichever purpose and in any field of Use.
0
hf_public_repos
hf_public_repos/text-generation-inference/Dockerfile_amd
# Rust builder FROM lukemathwalker/cargo-chef:latest-rust-1.71 AS chef WORKDIR /usr/src ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse FROM chef as planner COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY launcher launcher RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder ARG GIT_SHA ARG DOCKER_LABEL RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \ rm -f $PROTOC_ZIP COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --release --recipe-path recipe.json COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY launcher launcher RUN cargo build --release # Text Generation Inference base image for RoCm FROM rocm/dev-ubuntu-20.04:5.7 as base RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ build-essential \ ca-certificates \ ccache \ curl \ git \ make \ libssl-dev \ g++ \ # Needed to build VLLM & flash. rocthrust-dev \ hipsparse-dev \ hipblas-dev && \ rm -rf /var/lib/apt/lists/* # Keep in sync with `server/pyproject.toml ARG MAMBA_VERSION=23.1.0-1 ARG PYTORCH_VERSION='2.2.0.dev0' ARG ROCM_VERSION='5.7' ARG PYTHON_VERSION='3.10.10' # Automatically set by buildx ARG TARGETPLATFORM ENV PATH /opt/conda/bin:$PATH # TGI seem to require libssl.so.1.1 instead of libssl.so.3 so we can't use ubuntu 22.04. Ubuntu 20.04 has python==3.8, and TGI requires python>=3.9, hence the need for miniconda. # Install mamba # translating Docker's TARGETPLATFORM into mamba arches RUN case ${TARGETPLATFORM} in \ "linux/arm64") MAMBA_ARCH=aarch64 ;; \ *) MAMBA_ARCH=x86_64 ;; \ esac && \ curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh" RUN chmod +x ~/mambaforge.sh && \ bash ~/mambaforge.sh -b -p /opt/conda && \ mamba init && \ rm ~/mambaforge.sh # Install PyTorch nightly (2.2.0.dev2023) compiled against RoCm 5.7, as VLLM can not be compiled with RoCm 5.6. RUN pip install --pre torch==2.2.0.dev20231106 --index-url https://download.pytorch.org/whl/nightly/rocm5.7 FROM base AS kernel-builder # Build vllm kernels FROM kernel-builder AS vllm-builder WORKDIR /usr/src COPY server/Makefile-vllm Makefile # Build specific version of vllm RUN make build-vllm-rocm # Build Flash Attention v2 kernels FROM kernel-builder AS flash-att-v2-builder WORKDIR /usr/src COPY server/Makefile-flash-att-v2 Makefile # Build specific version of flash attention v2 RUN make build-flash-attention-v2-rocm # Build Transformers CUDA kernels (gpt-neox and bloom) FROM kernel-builder as custom-kernels-builder WORKDIR /usr/src COPY server/custom_kernels/ . RUN PYTORCH_ROCM_ARCH=gfx90a python setup.py build FROM base as base-copy # Text Generation Inference base env ENV HUGGINGFACE_HUB_CACHE=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 # Copy builds artifacts from vllm builder COPY --from=vllm-builder /usr/src/vllm/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from flash attention v2 builder COPY --from=flash-att-v2-builder /usr/src/flash-attention-v2/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from custom kernels builder COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Install flash-attention dependencies RUN pip install einops --no-cache-dir # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile RUN cd server && \ make gen-server && \ pip install -r requirements_rocm.txt && \ pip install ".[accelerate, peft]" --no-cache-dir # Install benchmarker COPY --from=builder /usr/src/target/release/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release/text-generation-launcher /usr/local/bin/text-generation-launcher # AWS Sagemaker compatible image FROM base-copy as sagemaker COPY sagemaker-entrypoint.sh entrypoint.sh RUN chmod +x entrypoint.sh ENTRYPOINT ["./entrypoint.sh"] # Final image FROM base-copy ENTRYPOINT ["text-generation-launcher"] CMD ["--json-output"]
0
hf_public_repos
hf_public_repos/text-generation-inference/Dockerfile
# Rust builder FROM lukemathwalker/cargo-chef:latest-rust-1.71 AS chef WORKDIR /usr/src ARG CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse FROM chef as planner COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY launcher launcher RUN cargo chef prepare --recipe-path recipe.json FROM chef AS builder ARG GIT_SHA ARG DOCKER_LABEL RUN PROTOC_ZIP=protoc-21.12-linux-x86_64.zip && \ curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP && \ unzip -o $PROTOC_ZIP -d /usr/local bin/protoc && \ unzip -o $PROTOC_ZIP -d /usr/local 'include/*' && \ rm -f $PROTOC_ZIP COPY --from=planner /usr/src/recipe.json recipe.json RUN cargo chef cook --release --recipe-path recipe.json COPY Cargo.toml Cargo.toml COPY rust-toolchain.toml rust-toolchain.toml COPY proto proto COPY benchmark benchmark COPY router router COPY launcher launcher RUN cargo build --release # Python builder # Adapted from: https://github.com/pytorch/pytorch/blob/master/Dockerfile FROM nvidia/cuda:12.1.0-devel-ubuntu20.04 as pytorch-install ARG PYTORCH_VERSION=2.1.1 ARG PYTHON_VERSION=3.10 # Keep in sync with `server/pyproject.toml ARG CUDA_VERSION=12.1 ARG MAMBA_VERSION=23.3.1-1 ARG CUDA_CHANNEL=nvidia ARG INSTALL_CHANNEL=pytorch # Automatically set by buildx ARG TARGETPLATFORM ENV PATH /opt/conda/bin:$PATH RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ build-essential \ ca-certificates \ ccache \ curl \ git && \ rm -rf /var/lib/apt/lists/* # Install conda # translating Docker's TARGETPLATFORM into mamba arches RUN case ${TARGETPLATFORM} in \ "linux/arm64") MAMBA_ARCH=aarch64 ;; \ *) MAMBA_ARCH=x86_64 ;; \ esac && \ curl -fsSL -v -o ~/mambaforge.sh -O "https://github.com/conda-forge/miniforge/releases/download/${MAMBA_VERSION}/Mambaforge-${MAMBA_VERSION}-Linux-${MAMBA_ARCH}.sh" RUN chmod +x ~/mambaforge.sh && \ bash ~/mambaforge.sh -b -p /opt/conda && \ rm ~/mambaforge.sh # Install pytorch # On arm64 we exit with an error code RUN case ${TARGETPLATFORM} in \ "linux/arm64") exit 1 ;; \ *) /opt/conda/bin/conda update -y conda && \ /opt/conda/bin/conda install -c "${INSTALL_CHANNEL}" -c "${CUDA_CHANNEL}" -y "python=${PYTHON_VERSION}" "pytorch=$PYTORCH_VERSION" "pytorch-cuda=$(echo $CUDA_VERSION | cut -d'.' -f 1-2)" ;; \ esac && \ /opt/conda/bin/conda clean -ya # CUDA kernels builder image FROM pytorch-install as kernel-builder ARG MAX_JOBS=8 RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ ninja-build \ && rm -rf /var/lib/apt/lists/* # Build Flash Attention CUDA kernels FROM kernel-builder as flash-att-builder WORKDIR /usr/src COPY server/Makefile-flash-att Makefile # Build specific version of flash attention RUN make build-flash-attention # Build Flash Attention v2 CUDA kernels FROM kernel-builder as flash-att-v2-builder WORKDIR /usr/src COPY server/Makefile-flash-att-v2 Makefile # Build specific version of flash attention v2 RUN make build-flash-attention-v2-cuda # Build Transformers exllama kernels FROM kernel-builder as exllama-kernels-builder WORKDIR /usr/src COPY server/exllama_kernels/ . RUN TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" python setup.py build # Build Transformers exllama kernels FROM kernel-builder as exllamav2-kernels-builder WORKDIR /usr/src COPY server/exllamav2_kernels/ . # Build specific version of transformers RUN TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" python setup.py build # Build Transformers awq kernels FROM kernel-builder as awq-kernels-builder WORKDIR /usr/src COPY server/Makefile-awq Makefile # Build specific version of transformers RUN TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" make build-awq # Build eetq kernels FROM kernel-builder as eetq-kernels-builder WORKDIR /usr/src COPY server/Makefile-eetq Makefile # Build specific version of transformers RUN TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" make build-eetq # Build Transformers CUDA kernels FROM kernel-builder as custom-kernels-builder WORKDIR /usr/src COPY server/custom_kernels/ . # Build specific version of transformers RUN python setup.py build # Build vllm CUDA kernels FROM kernel-builder as vllm-builder WORKDIR /usr/src COPY server/Makefile-vllm Makefile # Build specific version of vllm RUN make build-vllm-cuda # Text Generation Inference base image FROM nvidia/cuda:12.1.0-base-ubuntu20.04 as base # Conda env ENV PATH=/opt/conda/bin:$PATH \ CONDA_PREFIX=/opt/conda # Text Generation Inference base env ENV HUGGINGFACE_HUB_CACHE=/data \ HF_HUB_ENABLE_HF_TRANSFER=1 \ PORT=80 WORKDIR /usr/src RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ libssl-dev \ ca-certificates \ make \ curl \ && rm -rf /var/lib/apt/lists/* # Copy conda with PyTorch installed COPY --from=pytorch-install /opt/conda /opt/conda # Copy build artifacts from flash attention builder COPY --from=flash-att-builder /usr/src/flash-attention/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages COPY --from=flash-att-builder /usr/src/flash-attention/csrc/layer_norm/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages COPY --from=flash-att-builder /usr/src/flash-attention/csrc/rotary/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from flash attention v2 builder COPY --from=flash-att-v2-builder /usr/src/flash-attention-v2/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from custom kernels builder COPY --from=custom-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from exllama kernels builder COPY --from=exllama-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from exllamav2 kernels builder COPY --from=exllamav2-kernels-builder /usr/src/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from awq kernels builder COPY --from=awq-kernels-builder /usr/src/llm-awq/awq/kernels/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy build artifacts from eetq kernels builder COPY --from=eetq-kernels-builder /usr/src/eetq/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Copy builds artifacts from vllm builder COPY --from=vllm-builder /usr/src/vllm/build/lib.linux-x86_64-cpython-310 /opt/conda/lib/python3.10/site-packages # Install flash-attention dependencies RUN pip install einops --no-cache-dir # Install server COPY proto proto COPY server server COPY server/Makefile server/Makefile RUN cd server && \ make gen-server && \ pip install -r requirements_cuda.txt && \ pip install ".[bnb, accelerate, quantize, peft]" --no-cache-dir # Install benchmarker COPY --from=builder /usr/src/target/release/text-generation-benchmark /usr/local/bin/text-generation-benchmark # Install router COPY --from=builder /usr/src/target/release/text-generation-router /usr/local/bin/text-generation-router # Install launcher COPY --from=builder /usr/src/target/release/text-generation-launcher /usr/local/bin/text-generation-launcher RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ build-essential \ g++ \ && rm -rf /var/lib/apt/lists/* # AWS Sagemaker compatible image FROM base as sagemaker COPY sagemaker-entrypoint.sh entrypoint.sh RUN chmod +x entrypoint.sh ENTRYPOINT ["./entrypoint.sh"] # Final image FROM base ENTRYPOINT ["text-generation-launcher"] CMD ["--json-output"]
0
hf_public_repos
hf_public_repos/text-generation-inference/Makefile
install-server: cd server && make install install-custom-kernels: if [ "$$BUILD_EXTENSIONS" = "True" ]; then cd server/custom_kernels && python setup.py install; else echo "Custom kernels are disabled, you need to set the BUILD_EXTENSIONS environment variable to 'True' in order to build them. (Please read the docs, kernels might not work on all hardware)"; fi install-integration-tests: cd integration-tests && pip install -r requirements.txt cd clients/python && pip install . install-router: cd router && cargo install --path . install-launcher: cd launcher && cargo install --path . install-benchmark: cd benchmark && cargo install --path . install: install-server install-router install-launcher install-custom-kernels server-dev: cd server && make run-dev router-dev: cd router && cargo run -- --port 8080 rust-tests: install-router install-launcher cargo test integration-tests: install-integration-tests pytest -s -vv -m "not private" integration-tests update-integration-tests: install-integration-tests pytest -s -vv --snapshot-update integration-tests python-server-tests: HF_HUB_ENABLE_HF_TRANSFER=1 pytest -s -vv -m "not private" server/tests python-client-tests: pytest clients/python/tests python-tests: python-server-tests python-client-tests run-falcon-7b-instruct: text-generation-launcher --model-id tiiuae/falcon-7b-instruct --port 8080 run-falcon-7b-instruct-quantize: text-generation-launcher --model-id tiiuae/falcon-7b-instruct --quantize bitsandbytes --port 8080 clean: rm -rf target aml
0
hf_public_repos
hf_public_repos/text-generation-inference/Cargo.lock
# This file is automatically @generated by Cargo. # It is not intended for manual editing. version = 3 [[package]] name = "addr2line" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] [[package]] name = "adler" version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" dependencies = [ "cfg-if", "once_cell", "version_check", "zerocopy", ] [[package]] name = "aho-corasick" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" dependencies = [ "memchr", ] [[package]] name = "anstream" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", "utf8parse", ] [[package]] name = "anstyle" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" [[package]] name = "anstyle-parse" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" dependencies = [ "windows-sys 0.48.0", ] [[package]] name = "anstyle-wincon" version = "3.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" dependencies = [ "anstyle", "windows-sys 0.48.0", ] [[package]] name = "anyhow" version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" [[package]] name = "arc-swap" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" [[package]] name = "async-rustls" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93b21a03b7c21702a0110f9f8d228763a533570deb376119042dabf33c37a01a" dependencies = [ "futures-io", "rustls 0.20.9", "webpki", ] [[package]] name = "async-stream" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ "async-stream-impl", "futures-core", "pin-project-lite", ] [[package]] name = "async-stream-impl" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "async-trait" version = "0.1.74" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "autocfg" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "average" version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d804c74bb2d66e9b7047658d21af0f1c937d7d2466410cbf1aed3b0c04048d4" dependencies = [ "easy-cast", "float-ord", "num-traits", ] [[package]] name = "awaitdrop" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771051cdc7eec2dc1b23fbf870bb7fbb89136fe374227c875e377f1eed99a429" dependencies = [ "futures", "generational-arena", "parking_lot", "slotmap", ] [[package]] name = "axum" version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" dependencies = [ "async-trait", "axum-core", "bitflags 1.3.2", "bytes", "futures-util", "http", "http-body", "hyper", "itoa", "matchit", "memchr", "mime", "percent-encoding", "pin-project-lite", "rustversion", "serde", "serde_json", "serde_path_to_error", "serde_urlencoded", "sync_wrapper", "tokio", "tower", "tower-layer", "tower-service", ] [[package]] name = "axum-core" version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" dependencies = [ "async-trait", "bytes", "futures-util", "http", "http-body", "mime", "rustversion", "tower-layer", "tower-service", ] [[package]] name = "axum-tracing-opentelemetry" version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06985105829f176e9a3f113b1c71cc24e08f600ef0df4e70cd90d144f889e19f" dependencies = [ "axum", "futures-core", "futures-util", "http", "opentelemetry", "pin-project-lite", "tower", "tracing", "tracing-opentelemetry", "tracing-opentelemetry-instrumentation-sdk", ] [[package]] name = "backtrace" version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", ] [[package]] name = "base64" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" version = "0.21.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" [[package]] name = "bitflags" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "block-buffer" version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" dependencies = [ "generic-array", ] [[package]] name = "bumpalo" version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" [[package]] name = "bytecount" version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1e5f035d16fc623ae5f74981db80a439803888314e3a555fd6f04acd51a3205" [[package]] name = "byteorder" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" [[package]] name = "cassowary" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53" [[package]] name = "cc" version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" dependencies = [ "libc", ] [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" version = "4.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fffed7514f420abec6d183b1d3acfd9099c79c3a10a06ade4f8203f1411272" dependencies = [ "clap_builder", "clap_derive", ] [[package]] name = "clap_builder" version = "4.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "63361bae7eef3771745f02d8d892bec2fee5f6e34af316ba556e7f97a7069ff1" dependencies = [ "anstream", "anstyle", "clap_lex", "strsim", ] [[package]] name = "clap_derive" version = "4.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" dependencies = [ "heck", "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "clap_lex" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" [[package]] name = "colorchoice" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" [[package]] name = "console" version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" dependencies = [ "encode_unicode", "lazy_static", "libc", "unicode-width", "windows-sys 0.45.0", ] [[package]] name = "core-foundation" version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "core-foundation-sys" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" [[package]] name = "cpufeatures" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce420fe07aecd3e67c5f910618fe65e94158f6dcc0adf44e00d69ce2bdfe0fd0" dependencies = [ "libc", ] [[package]] name = "crc32fast" version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-channel" version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" dependencies = [ "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" dependencies = [ "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" version = "0.9.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", "memoffset", "scopeguard", ] [[package]] name = "crossbeam-utils" version = "0.8.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" dependencies = [ "cfg-if", ] [[package]] name = "crossterm" version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f476fe445d41c9e991fd07515a6f463074b782242ccf4a5b7b1d1012e70824df" dependencies = [ "bitflags 2.4.1", "crossterm_winapi", "libc", "mio", "parking_lot", "signal-hook", "signal-hook-mio", "winapi", ] [[package]] name = "crossterm_winapi" version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" dependencies = [ "winapi", ] [[package]] name = "crypto-common" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] [[package]] name = "ctrlc" version = "3.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82e95fbd621905b854affdc67943b043a0fbb6ed7385fd5a25650d19a8a6cfdf" dependencies = [ "nix", "windows-sys 0.48.0", ] [[package]] name = "darling" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" dependencies = [ "darling_core", "darling_macro", ] [[package]] name = "darling_core" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", "syn 1.0.109", ] [[package]] name = "darling_macro" version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core", "quote", "syn 1.0.109", ] [[package]] name = "deranged" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" dependencies = [ "powerfmt", ] [[package]] name = "derive_builder" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" dependencies = [ "derive_builder_macro", ] [[package]] name = "derive_builder_core" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" dependencies = [ "darling", "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "derive_builder_macro" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e" dependencies = [ "derive_builder_core", "syn 1.0.109", ] [[package]] name = "digest" version = "0.10.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" dependencies = [ "block-buffer", "crypto-common", ] [[package]] name = "dirs" version = "4.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca3aa72a6f96ea37bbc5aa912f6788242832f75369bdfdadcb0e38423f100059" dependencies = [ "dirs-sys 0.3.7", ] [[package]] name = "dirs" version = "5.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44c45a9d03d6676652bcb5e724c7e988de1acad23a711b5217ab9cbecbec2225" dependencies = [ "dirs-sys 0.4.1", ] [[package]] name = "dirs-sys" version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ "libc", "redox_users", "winapi", ] [[package]] name = "dirs-sys" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "520f05a5cbd335fae5a99ff7a6ab8627577660ee5cfd6a94a6a929b52ff0321c" dependencies = [ "libc", "option-ext", "redox_users", "windows-sys 0.48.0", ] [[package]] name = "easy-cast" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "10936778145f3bea71fd9bf61332cce28c28e96a380714f7ab34838b80733fd6" dependencies = [ "libm", ] [[package]] name = "either" version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" [[package]] name = "encode_unicode" version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ "cfg-if", ] [[package]] name = "equivalent" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" dependencies = [ "libc", "windows-sys 0.52.0", ] [[package]] name = "esaxx-rs" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d817e038c30374a4bcb22f94d0a8a0e216958d4c3dcde369b1439fec4bdda6e6" dependencies = [ "cc", ] [[package]] name = "fastrand" version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" [[package]] name = "fixedbitset" version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" dependencies = [ "crc32fast", "miniz_oxide", ] [[package]] name = "float-ord" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce81f49ae8a0482e4c55ea62ebbd7e5a686af544c00b9d090bba3ff9be97b3d" [[package]] name = "float_eq" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28a80e3145d8ad11ba0995949bbcf48b9df2be62772b3d351ef017dff6ecb853" [[package]] name = "fnv" version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "foreign-types" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" dependencies = [ "foreign-types-shared", ] [[package]] name = "foreign-types-shared" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] [[package]] name = "futures" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" dependencies = [ "futures-channel", "futures-core", "futures-executor", "futures-io", "futures-sink", "futures-task", "futures-util", ] [[package]] name = "futures-channel" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" dependencies = [ "futures-core", "futures-sink", ] [[package]] name = "futures-core" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" [[package]] name = "futures-executor" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" dependencies = [ "futures-core", "futures-task", "futures-util", ] [[package]] name = "futures-io" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" [[package]] name = "futures-macro" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "futures-sink" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" [[package]] name = "futures-task" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" [[package]] name = "futures-util" version = "0.3.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" dependencies = [ "futures-channel", "futures-core", "futures-io", "futures-macro", "futures-sink", "futures-task", "memchr", "pin-project-lite", "pin-utils", "slab", ] [[package]] name = "generational-arena" version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "877e94aff08e743b651baaea359664321055749b398adff8740a7399af7796e7" dependencies = [ "cfg-if", ] [[package]] name = "generic-array" version = "0.14.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", ] [[package]] name = "getrandom" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe9006bed769170c11f845cf00c7c1e9092aeb3f268e007c3e760ac68008070f" dependencies = [ "cfg-if", "libc", "wasi", ] [[package]] name = "gimli" version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" [[package]] name = "grpc-metadata" version = "0.1.0" dependencies = [ "opentelemetry", "tonic 0.10.2", "tracing", "tracing-opentelemetry", ] [[package]] name = "h2" version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d6250322ef6e60f93f9a2162799302cd6f68f79f6e5d85c8c16f14d1d958178" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", "http", "indexmap 2.1.0", "slab", "tokio", "tokio-util", "tracing", ] [[package]] name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ff8ae62cd3a9102e5637afc8452c55acf3844001bd5374e0b0bd7b6616c038" dependencies = [ "ahash", ] [[package]] name = "hashbrown" version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" [[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d77f7ec81a6d05a3abb01ab6eb7590f6083d08449fe5a1c8b1e620283546ccb7" [[package]] name = "hf-hub" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b780635574b3d92f036890d8373433d6f9fc7abb320ee42a5c25897fc8ed732" dependencies = [ "dirs 5.0.1", "indicatif", "log", "native-tls", "rand", "serde", "serde_json", "thiserror", "ureq", ] [[package]] name = "home" version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" dependencies = [ "windows-sys 0.48.0", ] [[package]] name = "hostname" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" dependencies = [ "libc", "match_cfg", "winapi", ] [[package]] name = "http" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" dependencies = [ "bytes", "fnv", "itoa", ] [[package]] name = "http-body" version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", "pin-project-lite", ] [[package]] name = "http-range-header" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" [[package]] name = "httparse" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" version = "0.14.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", "h2", "http", "http-body", "httparse", "httpdate", "itoa", "pin-project-lite", "socket2 0.4.10", "tokio", "tower-service", "tracing", "want", ] [[package]] name = "hyper-timeout" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" dependencies = [ "hyper", "pin-project-lite", "tokio", "tokio-io-timeout", ] [[package]] name = "hyper-tls" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", "hyper", "native-tls", "tokio", "tokio-native-tls", ] [[package]] name = "ident_case" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", ] [[package]] name = "indexmap" version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" dependencies = [ "autocfg", "hashbrown 0.12.3", ] [[package]] name = "indexmap" version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" dependencies = [ "equivalent", "hashbrown 0.14.3", "serde", ] [[package]] name = "indicatif" version = "0.17.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb28741c9db9a713d93deb3bb9515c20788cef5815265bee4980e87bde7e0f25" dependencies = [ "console", "instant", "number_prefix", "portable-atomic", "unicode-width", ] [[package]] name = "indoc" version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e186cfbae8084e513daff4240b4797e342f988cecda4fb6c939150f96315fd8" [[package]] name = "init-tracing-opentelemetry" version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94bd26b1b737bc11f183620072e188d1c6ede67e0e78682228d66b49ec510e17" dependencies = [ "opentelemetry", "opentelemetry-otlp", "thiserror", "tracing", "tracing-opentelemetry", ] [[package]] name = "instant" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if", ] [[package]] name = "ipnet" version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "itertools" version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itertools" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" dependencies = [ "either", ] [[package]] name = "itoa" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" [[package]] name = "js-sys" version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cee9c64da59eae3b50095c18d3e74f8b73c0b86d2792824ff01bbce68ba229ca" dependencies = [ "wasm-bindgen", ] [[package]] name = "lazy_static" version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "libm" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" [[package]] name = "libredox" version = "0.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" dependencies = [ "bitflags 2.4.1", "libc", "redox_syscall", ] [[package]] name = "linux-raw-sys" version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" [[package]] name = "lock_api" version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" dependencies = [ "autocfg", "scopeguard", ] [[package]] name = "log" version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" [[package]] name = "mach2" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8" dependencies = [ "libc", ] [[package]] name = "macro_rules_attribute" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a82271f7bc033d84bbca59a3ce3e4159938cb08a9c3aebbe54d215131518a13" dependencies = [ "macro_rules_attribute-proc_macro", "paste", ] [[package]] name = "macro_rules_attribute-proc_macro" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8dd856d451cc0da70e2ef2ce95a18e39a93b7558bedf10201ad28503f918568" [[package]] name = "match_cfg" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata 0.1.10", ] [[package]] name = "matchit" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" [[package]] name = "memoffset" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" dependencies = [ "autocfg", ] [[package]] name = "metrics" version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fde3af1a009ed76a778cb84fdef9e7dbbdf5775ae3e4cc1f434a6a307f6f76c5" dependencies = [ "ahash", "metrics-macros", "portable-atomic", ] [[package]] name = "metrics-exporter-prometheus" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a4964177ddfdab1e3a2b37aec7cf320e14169abb0ed73999f558136409178d5" dependencies = [ "base64 0.21.5", "hyper", "indexmap 1.9.3", "ipnet", "metrics", "metrics-util", "quanta", "thiserror", "tokio", "tracing", ] [[package]] name = "metrics-macros" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ddece26afd34c31585c74a4db0630c376df271c285d682d1e55012197830b6df" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "metrics-util" version = "0.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4de2ed6e491ed114b40b732e4d1659a9d53992ebd87490c44a6ffe23739d973e" dependencies = [ "crossbeam-epoch", "crossbeam-utils", "hashbrown 0.13.1", "metrics", "num_cpus", "quanta", "sketches-ddsketch", ] [[package]] name = "mime" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ "mime", "unicase", ] [[package]] name = "minimal-lexical" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" dependencies = [ "adler", ] [[package]] name = "mio" version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" dependencies = [ "libc", "log", "wasi", "windows-sys 0.48.0", ] [[package]] name = "monostate" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e404e13820ea0df0eda93aa294e0c80de76a0daa6bec590d376fbec6d7810394" dependencies = [ "monostate-impl", "serde", ] [[package]] name = "monostate-impl" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "531c82a934da419bed3da09bd87d6e98c72f8d4aa755427b3b009c2b8b8c433c" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "multimap" version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" [[package]] name = "muxado" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e92b89ac3127251efde6f5a9586e5aae99468d06fcf9f133b377f58d5ed66446" dependencies = [ "async-trait", "awaitdrop", "bitflags 1.3.2", "bytes", "futures", "pin-project", "rand", "thiserror", "tokio", "tokio-util", "tracing", ] [[package]] name = "native-tls" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", "log", "openssl", "openssl-probe", "openssl-sys", "schannel", "security-framework", "security-framework-sys", "tempfile", ] [[package]] name = "ngrok" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1454b1edbc5f2c8ff3242c237cb84388b50eced8eb26b4204e49698ed6511784" dependencies = [ "arc-swap", "async-rustls", "async-trait", "awaitdrop", "axum", "base64 0.13.1", "bytes", "futures", "hostname", "hyper", "muxado", "once_cell", "parking_lot", "regex", "rustls-pemfile", "serde", "serde_json", "thiserror", "tokio", "tokio-retry", "tokio-util", "tracing", "windows-sys 0.45.0", ] [[package]] name = "nix" version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ "bitflags 2.4.1", "cfg-if", "libc", ] [[package]] name = "nohash-hasher" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" version = "7.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" dependencies = [ "memchr", "minimal-lexical", ] [[package]] name = "ntapi" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4" dependencies = [ "winapi", ] [[package]] name = "nu-ansi-term" version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ "overload", "winapi", ] [[package]] name = "num-traits" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" dependencies = [ "autocfg", "libm", ] [[package]] name = "num_cpus" version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ "hermit-abi", "libc", ] [[package]] name = "num_threads" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ "libc", ] [[package]] name = "number_prefix" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "memchr", ] [[package]] name = "once_cell" version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" [[package]] name = "onig" version = "6.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c4b31c8722ad9171c6d77d3557db078cab2bd50afcc9d09c8b315c59df8ca4f" dependencies = [ "bitflags 1.3.2", "libc", "once_cell", "onig_sys", ] [[package]] name = "onig_sys" version = "69.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b829e3d7e9cc74c7e315ee8edb185bf4190da5acde74afd7fc59c35b1f086e7" dependencies = [ "cc", "pkg-config", ] [[package]] name = "openssl" version = "0.10.60" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "79a4c6c3a2b158f7f8f2a2fc5a969fa3a068df6fc9dbb4a43845436e3af7c800" dependencies = [ "bitflags 2.4.1", "cfg-if", "foreign-types", "libc", "once_cell", "openssl-macros", "openssl-sys", ] [[package]] name = "openssl-macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "openssl-probe" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" version = "0.9.96" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3812c071ba60da8b5677cc12bcb1d42989a65553772897a7e0355545a819838f" dependencies = [ "cc", "libc", "pkg-config", "vcpkg", ] [[package]] name = "opentelemetry" version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9591d937bc0e6d2feb6f71a559540ab300ea49955229c347a517a28d27784c54" dependencies = [ "opentelemetry_api", "opentelemetry_sdk", ] [[package]] name = "opentelemetry-http" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7594ec0e11d8e33faf03530a4c49af7064ebba81c1480e01be67d90b356508b" dependencies = [ "async-trait", "bytes", "http", "opentelemetry_api", ] [[package]] name = "opentelemetry-otlp" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e5e5a5c4135864099f3faafbe939eb4d7f9b80ebf68a8448da961b32a7c1275" dependencies = [ "async-trait", "futures-core", "http", "opentelemetry-proto", "opentelemetry-semantic-conventions", "opentelemetry_api", "opentelemetry_sdk", "prost 0.11.9", "thiserror", "tokio", "tonic 0.9.2", ] [[package]] name = "opentelemetry-proto" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1e3f814aa9f8c905d0ee4bde026afd3b2577a97c10e1699912e3e44f0c4cbeb" dependencies = [ "opentelemetry_api", "opentelemetry_sdk", "prost 0.11.9", "tonic 0.9.2", ] [[package]] name = "opentelemetry-semantic-conventions" version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73c9f9340ad135068800e7f1b24e9e09ed9e7143f5bf8518ded3d3ec69789269" dependencies = [ "opentelemetry", ] [[package]] name = "opentelemetry_api" version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a81f725323db1b1206ca3da8bb19874bbd3f57c3bcd59471bfb04525b265b9b" dependencies = [ "futures-channel", "futures-util", "indexmap 1.9.3", "js-sys", "once_cell", "pin-project-lite", "thiserror", "urlencoding", ] [[package]] name = "opentelemetry_sdk" version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa8e705a0612d48139799fcbaba0d4a90f06277153e43dd2bdc16c6f0edd8026" dependencies = [ "async-trait", "crossbeam-channel", "futures-channel", "futures-executor", "futures-util", "once_cell", "opentelemetry_api", "ordered-float", "percent-encoding", "rand", "regex", "serde_json", "thiserror", "tokio", "tokio-stream", ] [[package]] name = "option-ext" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "ordered-float" version = "3.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1e1c390732d15f1d48471625cd92d154e66db2c56645e29a9cd26f4699f72dc" dependencies = [ "num-traits", ] [[package]] name = "overload" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "papergrid" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2ccbe15f2b6db62f9a9871642746427e297b0ceb85f9a7f1ee5ff47d184d0c8" dependencies = [ "bytecount", "fnv", "unicode-width", ] [[package]] name = "parking_lot" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", "windows-targets 0.48.5", ] [[package]] name = "paste" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" [[package]] name = "percent-encoding" version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "petgraph" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", "indexmap 2.1.0", ] [[package]] name = "pin-project" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "pin-project-lite" version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" [[package]] name = "pin-utils" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" [[package]] name = "portable-atomic" version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3bccab0e7fd7cc19f820a1c8c91720af652d0c88dc9664dd72aef2614f04af3b" [[package]] name = "powerfmt" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "prettyplease" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" dependencies = [ "proc-macro2", "syn 2.0.39", ] [[package]] name = "proc-macro-error" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2", "quote", "syn 1.0.109", "version_check", ] [[package]] name = "proc-macro-error-attr" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2", "quote", "version_check", ] [[package]] name = "proc-macro2" version = "1.0.70" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" dependencies = [ "unicode-ident", ] [[package]] name = "prost" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" dependencies = [ "bytes", "prost-derive 0.11.9", ] [[package]] name = "prost" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" dependencies = [ "bytes", "prost-derive 0.12.3", ] [[package]] name = "prost-build" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c55e02e35260070b6f716a2423c2ff1c3bb1642ddca6f99e1f26d06268a0e2d2" dependencies = [ "bytes", "heck", "itertools 0.11.0", "log", "multimap", "once_cell", "petgraph", "prettyplease", "prost 0.12.3", "prost-types", "regex", "syn 2.0.39", "tempfile", "which", ] [[package]] name = "prost-derive" version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "prost-derive" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" dependencies = [ "anyhow", "itertools 0.11.0", "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "prost-types" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e" dependencies = [ "prost 0.12.3", ] [[package]] name = "quanta" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab" dependencies = [ "crossbeam-utils", "libc", "mach2", "once_cell", "raw-cpuid", "wasi", "web-sys", "winapi", ] [[package]] name = "quote" version = "1.0.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" dependencies = [ "proc-macro2", ] [[package]] name = "rand" version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", "rand_core", ] [[package]] name = "rand_core" version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom", ] [[package]] name = "ratatui" version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2e2e4cd95294a85c3b4446e63ef054eea43e0205b1fd60120c16b74ff7ff96ad" dependencies = [ "bitflags 2.4.1", "cassowary", "crossterm", "indoc", "itertools 0.11.0", "paste", "strum", "unicode-segmentation", "unicode-width", ] [[package]] name = "raw-cpuid" version = "10.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c297679cb867470fa8c9f67dbba74a78d78e3e98d7cf2b08d6d71540f797332" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "rayon" version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c27db03db7734835b3f53954b534c91069375ce6ccaa2e065441e07d9b6cdb1" dependencies = [ "either", "rayon-core", ] [[package]] name = "rayon-cond" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "059f538b55efd2309c9794130bc149c6a553db90e9d99c2030785c82f0bd7df9" dependencies = [ "either", "itertools 0.11.0", "rayon", ] [[package]] name = "rayon-core" version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ce3fb6ad83f861aac485e76e1985cd109d9a3713802152be56c3b1f0e0658ed" dependencies = [ "crossbeam-deque", "crossbeam-utils", ] [[package]] name = "redox_syscall" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_users" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" dependencies = [ "getrandom", "libredox", "thiserror", ] [[package]] name = "regex" version = "1.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" dependencies = [ "aho-corasick", "memchr", "regex-automata 0.4.3", "regex-syntax 0.8.2", ] [[package]] name = "regex-automata" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ "regex-syntax 0.6.29", ] [[package]] name = "regex-automata" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" dependencies = [ "aho-corasick", "memchr", "regex-syntax 0.8.2", ] [[package]] name = "regex-syntax" version = "0.6.29" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" [[package]] name = "regex-syntax" version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" version = "0.11.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" dependencies = [ "base64 0.21.5", "bytes", "encoding_rs", "futures-core", "futures-util", "h2", "http", "http-body", "hyper", "hyper-tls", "ipnet", "js-sys", "log", "mime", "native-tls", "once_cell", "percent-encoding", "pin-project-lite", "serde", "serde_json", "serde_urlencoded", "system-configuration", "tokio", "tokio-native-tls", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "web-sys", "winreg", ] [[package]] name = "ring" version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" dependencies = [ "cc", "libc", "once_cell", "spin 0.5.2", "untrusted 0.7.1", "web-sys", "winapi", ] [[package]] name = "ring" version = "0.17.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "684d5e6e18f669ccebf64a92236bb7db9a34f07be010e3627368182027180866" dependencies = [ "cc", "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", "windows-sys 0.48.0", ] [[package]] name = "rust-embed" version = "6.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a36224c3276f8c4ebc8c20f158eca7ca4359c8db89991c4925132aaaf6702661" dependencies = [ "rust-embed-impl", "rust-embed-utils", "walkdir", ] [[package]] name = "rust-embed-impl" version = "6.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49b94b81e5b2c284684141a2fb9e2a31be90638caf040bf9afbc5a0416afe1ac" dependencies = [ "proc-macro2", "quote", "rust-embed-utils", "shellexpand", "syn 2.0.39", "walkdir", ] [[package]] name = "rust-embed-utils" version = "7.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d38ff6bf570dc3bb7100fce9f7b60c33fa71d80e88da3f2580df4ff2bdded74" dependencies = [ "sha2", "walkdir", ] [[package]] name = "rustc-demangle" version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ "semver", ] [[package]] name = "rustix" version = "0.38.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc99bc2d4f1fed22595588a013687477aedf3cdcfb26558c559edb67b4d9b22e" dependencies = [ "bitflags 2.4.1", "errno", "libc", "linux-raw-sys", "windows-sys 0.48.0", ] [[package]] name = "rustls" version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" dependencies = [ "log", "ring 0.16.20", "sct", "webpki", ] [[package]] name = "rustls" version = "0.21.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" dependencies = [ "log", "ring 0.17.6", "rustls-webpki", "sct", ] [[package]] name = "rustls-pemfile" version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ "base64 0.21.5", ] [[package]] name = "rustls-webpki" version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ "ring 0.17.6", "untrusted 0.9.0", ] [[package]] name = "rustversion" version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" [[package]] name = "ryu" version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" [[package]] name = "same-file" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" dependencies = [ "winapi-util", ] [[package]] name = "schannel" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" dependencies = [ "windows-sys 0.48.0", ] [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sct" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ "ring 0.17.6", "untrusted 0.9.0", ] [[package]] name = "security-framework" version = "2.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" dependencies = [ "bitflags 1.3.2", "core-foundation", "core-foundation-sys", "libc", "security-framework-sys", ] [[package]] name = "security-framework-sys" version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "semver" version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" [[package]] name = "serde" version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" version = "1.0.193" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "serde_json" version = "1.0.108" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" dependencies = [ "itoa", "ryu", "serde", ] [[package]] name = "serde_path_to_error" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" dependencies = [ "itoa", "serde", ] [[package]] name = "serde_urlencoded" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", "itoa", "ryu", "serde", ] [[package]] name = "sha2" version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", "digest", ] [[package]] name = "sharded-slab" version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] [[package]] name = "shellexpand" version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ccc8076840c4da029af4f87e4e8daeb0fca6b87bbb02e10cb60b791450e11e4" dependencies = [ "dirs 4.0.0", ] [[package]] name = "signal-hook" version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" dependencies = [ "libc", "signal-hook-registry", ] [[package]] name = "signal-hook-mio" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29ad2e15f37ec9a6cc544097b78a1ec90001e9f71b81338ca39f430adaca99af" dependencies = [ "libc", "mio", "signal-hook", ] [[package]] name = "signal-hook-registry" version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" dependencies = [ "libc", ] [[package]] name = "sketches-ddsketch" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68a406c1882ed7f29cd5e248c9848a80e7cb6ae0fea82346d2746f2f941c07e1" [[package]] name = "slab" version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" dependencies = [ "autocfg", ] [[package]] name = "slotmap" version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1e08e261d0e8f5c43123b7adf3e4ca1690d655377ac93a03b2c9d3e98de1342" dependencies = [ "version_check", ] [[package]] name = "smallvec" version = "1.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dccd0940a2dcdf68d092b8cbab7dc0ad8fa938bf95787e1b916b0e3d0e8e970" [[package]] name = "socket2" version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", ] [[package]] name = "socket2" version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" dependencies = [ "libc", "windows-sys 0.48.0", ] [[package]] name = "spin" version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spin" version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "spm_precompiled" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5851699c4033c63636f7ea4cf7b7c1f1bf06d0cc03cfb42e711de5a5c46cf326" dependencies = [ "base64 0.13.1", "nom", "serde", "unicode-segmentation", ] [[package]] name = "strsim" version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strum" version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" dependencies = [ "strum_macros", ] [[package]] name = "strum_macros" version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ "heck", "proc-macro2", "quote", "rustversion", "syn 2.0.39", ] [[package]] name = "syn" version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "syn" version = "2.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] [[package]] name = "sync_wrapper" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "sysinfo" version = "0.29.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cd727fc423c2060f6c92d9534cef765c65a6ed3f428a03d7def74a8c4348e666" dependencies = [ "cfg-if", "core-foundation-sys", "libc", "ntapi", "once_cell", "winapi", ] [[package]] name = "system-configuration" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", "system-configuration-sys", ] [[package]] name = "system-configuration-sys" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" dependencies = [ "core-foundation-sys", "libc", ] [[package]] name = "tabled" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfe9c3632da101aba5131ed63f9eed38665f8b3c68703a6bb18124835c1a5d22" dependencies = [ "papergrid", "tabled_derive", "unicode-width", ] [[package]] name = "tabled_derive" version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99f688a08b54f4f02f0a3c382aefdb7884d3d69609f785bd253dc033243e3fe4" dependencies = [ "heck", "proc-macro-error", "proc-macro2", "quote", "syn 1.0.109", ] [[package]] name = "tempfile" version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" dependencies = [ "cfg-if", "fastrand", "redox_syscall", "rustix", "windows-sys 0.48.0", ] [[package]] name = "text-generation-benchmark" version = "1.2.0" dependencies = [ "average", "clap", "crossterm", "float-ord", "hf-hub", "ratatui", "serde", "serde_json", "tabled", "text-generation-client", "thiserror", "tokenizers", "tokio", "tracing", "tracing-subscriber", ] [[package]] name = "text-generation-client" version = "1.2.0" dependencies = [ "futures", "grpc-metadata", "prost 0.12.3", "prost-build", "thiserror", "tokio", "tonic 0.10.2", "tonic-build", "tower", "tracing", ] [[package]] name = "text-generation-launcher" version = "1.2.0" dependencies = [ "clap", "ctrlc", "float_eq", "nix", "reqwest", "serde", "serde_json", "tracing", "tracing-subscriber", "vergen", ] [[package]] name = "text-generation-router" version = "1.2.0" dependencies = [ "async-stream", "axum", "axum-tracing-opentelemetry", "clap", "futures", "hf-hub", "init-tracing-opentelemetry", "metrics", "metrics-exporter-prometheus", "ngrok", "nohash-hasher", "opentelemetry", "opentelemetry-otlp", "rand", "reqwest", "serde", "serde_json", "text-generation-client", "thiserror", "tokenizers", "tokio", "tokio-stream", "tower-http", "tracing", "tracing-opentelemetry", "tracing-subscriber", "utoipa", "utoipa-swagger-ui", "vergen", ] [[package]] name = "thiserror" version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "thread_local" version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ "cfg-if", "once_cell", ] [[package]] name = "time" version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" dependencies = [ "deranged", "itoa", "libc", "num_threads", "powerfmt", "serde", "time-core", "time-macros", ] [[package]] name = "time-core" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" dependencies = [ "time-core", ] [[package]] name = "tinyvec" version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] [[package]] name = "tinyvec_macros" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokenizers" version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9be88c795d8b9f9c4002b3a8f26a6d0876103a6f523b32ea3bac52d8560c17c" dependencies = [ "aho-corasick", "clap", "derive_builder", "esaxx-rs", "getrandom", "hf-hub", "indicatif", "itertools 0.11.0", "lazy_static", "log", "macro_rules_attribute", "monostate", "onig", "paste", "rand", "rayon", "rayon-cond", "regex", "regex-syntax 0.7.5", "serde", "serde_json", "spm_precompiled", "thiserror", "unicode-normalization-alignments", "unicode-segmentation", "unicode_categories", ] [[package]] name = "tokio" version = "1.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" dependencies = [ "backtrace", "bytes", "libc", "mio", "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2 0.5.5", "tokio-macros", "windows-sys 0.48.0", ] [[package]] name = "tokio-io-timeout" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" dependencies = [ "pin-project-lite", "tokio", ] [[package]] name = "tokio-macros" version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "tokio-native-tls" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbae76ab933c85776efabc971569dd6119c580d8f5d448769dec1764bf796ef2" dependencies = [ "native-tls", "tokio", ] [[package]] name = "tokio-retry" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" dependencies = [ "pin-project", "rand", "tokio", ] [[package]] name = "tokio-stream" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" dependencies = [ "futures-core", "pin-project-lite", "tokio", ] [[package]] name = "tokio-util" version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", "pin-project-lite", "tokio", "tracing", ] [[package]] name = "tonic" version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" dependencies = [ "async-trait", "axum", "base64 0.21.5", "bytes", "futures-core", "futures-util", "h2", "http", "http-body", "hyper", "hyper-timeout", "percent-encoding", "pin-project", "prost 0.11.9", "tokio", "tokio-stream", "tower", "tower-layer", "tower-service", "tracing", ] [[package]] name = "tonic" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" dependencies = [ "async-stream", "async-trait", "axum", "base64 0.21.5", "bytes", "h2", "http", "http-body", "hyper", "hyper-timeout", "percent-encoding", "pin-project", "prost 0.12.3", "tokio", "tokio-stream", "tower", "tower-layer", "tower-service", "tracing", ] [[package]] name = "tonic-build" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" dependencies = [ "prettyplease", "proc-macro2", "prost-build", "quote", "syn 2.0.39", ] [[package]] name = "tower" version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", "indexmap 1.9.3", "pin-project", "pin-project-lite", "rand", "slab", "tokio", "tokio-util", "tower-layer", "tower-service", "tracing", ] [[package]] name = "tower-http" version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ "bitflags 2.4.1", "bytes", "futures-core", "futures-util", "http", "http-body", "http-range-header", "pin-project-lite", "tower-layer", "tower-service", ] [[package]] name = "tower-layer" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" [[package]] name = "tower-service" version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", "pin-project-lite", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "tracing-core" version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", ] [[package]] name = "tracing-log" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" dependencies = [ "log", "once_cell", "tracing-core", ] [[package]] name = "tracing-log" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ "log", "once_cell", "tracing-core", ] [[package]] name = "tracing-opentelemetry" version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75327c6b667828ddc28f5e3f169036cb793c3f588d83bf0f262a7f062ffed3c8" dependencies = [ "once_cell", "opentelemetry", "opentelemetry_sdk", "smallvec", "tracing", "tracing-core", "tracing-log 0.1.4", "tracing-subscriber", ] [[package]] name = "tracing-opentelemetry-instrumentation-sdk" version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f523eba1b52bb854b804d43a039aafeaee5a623015065adbfef8016825319c15" dependencies = [ "http", "opentelemetry-http", "opentelemetry_api", "tracing", "tracing-opentelemetry", ] [[package]] name = "tracing-serde" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" dependencies = [ "serde", "tracing-core", ] [[package]] name = "tracing-subscriber" version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", "once_cell", "regex", "serde", "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log 0.2.0", "tracing-serde", ] [[package]] name = "try-lock" version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" [[package]] name = "typenum" version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] name = "unicase" version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" dependencies = [ "version_check", ] [[package]] name = "unicode-bidi" version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-normalization-alignments" version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "43f613e4fa046e69818dd287fdc4bc78175ff20331479dab6e1b0f98d57062de" dependencies = [ "smallvec", ] [[package]] name = "unicode-segmentation" version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" [[package]] name = "unicode-width" version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" [[package]] name = "unicode_categories" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" [[package]] name = "untrusted" version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "untrusted" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8cdd25c339e200129fe4de81451814e5228c9b771d57378817d6117cc2b3f97" dependencies = [ "base64 0.21.5", "flate2", "log", "native-tls", "once_cell", "rustls 0.21.9", "rustls-webpki", "serde", "serde_json", "url", "webpki-roots", ] [[package]] name = "url" version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", "idna", "percent-encoding", ] [[package]] name = "urlencoding" version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" [[package]] name = "utf8parse" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "utoipa" version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d82b1bc5417102a73e8464c686eef947bdfb99fcdfc0a4f228e81afa9526470a" dependencies = [ "indexmap 2.1.0", "serde", "serde_json", "utoipa-gen", ] [[package]] name = "utoipa-gen" version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05d96dcd6fc96f3df9b3280ef480770af1b7c5d14bc55192baa9b067976d920c" dependencies = [ "proc-macro-error", "proc-macro2", "quote", "regex", "syn 2.0.39", ] [[package]] name = "utoipa-swagger-ui" version = "3.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84614caa239fb25b2bb373a52859ffd94605ceb256eeb1d63436325cf81e3653" dependencies = [ "axum", "mime_guess", "regex", "rust-embed", "serde", "serde_json", "utoipa", "zip", ] [[package]] name = "valuable" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "vcpkg" version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "vergen" version = "8.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1290fd64cc4e7d3c9b07d7f333ce0ce0007253e32870e632624835cc80b83939" dependencies = [ "anyhow", "rustc_version", "rustversion", "sysinfo", "time", ] [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "walkdir" version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" dependencies = [ "same-file", "winapi-util", ] [[package]] name = "want" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" dependencies = [ "try-lock", ] [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ed0d4f68a3015cc185aff4db9506a015f4b96f95303897bfa23f846db54064e" dependencies = [ "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b56f625e64f3a1084ded111c4d5f477df9f8c92df113852fa5a374dbda78826" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", "syn 2.0.39", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" version = "0.4.39" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac36a15a220124ac510204aec1c3e5db8a22ab06fd6706d881dc6149f8ed9a12" dependencies = [ "cfg-if", "js-sys", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0162dbf37223cd2afce98f3d0785506dcb8d266223983e4b5b525859e6e182b2" dependencies = [ "quote", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" version = "0.2.89" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ab9b36309365056cd639da3134bf87fa8f3d86008abf99e612384a6eecd459f" [[package]] name = "web-sys" version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50c24a44ec86bb68fbecd1b3efed7e85ea5621b39b35ef2766b66cd984f8010f" dependencies = [ "js-sys", "wasm-bindgen", ] [[package]] name = "webpki" version = "0.22.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed63aea5ce73d0ff405984102c42de94fc55a6b75765d621c65262469b3c9b53" dependencies = [ "ring 0.17.6", "untrusted 0.9.0", ] [[package]] name = "webpki-roots" version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1778a42e8b3b90bff8d0f5032bf22250792889a5cdc752aa0020c84abe3aaf10" [[package]] name = "which" version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ "either", "home", "once_cell", "rustix", ] [[package]] name = "winapi" version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" dependencies = [ "winapi-i686-pc-windows-gnu", "winapi-x86_64-pc-windows-gnu", ] [[package]] name = "winapi-i686-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" dependencies = [ "winapi", ] [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-sys" version = "0.45.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" dependencies = [ "windows-targets 0.42.2", ] [[package]] name = "windows-sys" version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ "windows-targets 0.48.5", ] [[package]] name = "windows-sys" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ "windows-targets 0.52.0", ] [[package]] name = "windows-targets" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" dependencies = [ "windows_aarch64_gnullvm 0.42.2", "windows_aarch64_msvc 0.42.2", "windows_i686_gnu 0.42.2", "windows_i686_msvc 0.42.2", "windows_x86_64_gnu 0.42.2", "windows_x86_64_gnullvm 0.42.2", "windows_x86_64_msvc 0.42.2", ] [[package]] name = "windows-targets" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ "windows_aarch64_gnullvm 0.48.5", "windows_aarch64_msvc 0.48.5", "windows_i686_gnu 0.48.5", "windows_i686_msvc 0.48.5", "windows_x86_64_gnu 0.48.5", "windows_x86_64_gnullvm 0.48.5", "windows_x86_64_msvc 0.48.5", ] [[package]] name = "windows-targets" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" dependencies = [ "windows_aarch64_gnullvm 0.52.0", "windows_aarch64_msvc 0.52.0", "windows_i686_gnu 0.52.0", "windows_i686_msvc 0.52.0", "windows_x86_64_gnu 0.52.0", "windows_x86_64_gnullvm 0.52.0", "windows_x86_64_msvc 0.52.0", ] [[package]] name = "windows_aarch64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" [[package]] name = "windows_aarch64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" [[package]] name = "windows_i686_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" [[package]] name = "windows_i686_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" [[package]] name = "windows_x86_64_gnu" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" [[package]] name = "windows_x86_64_gnullvm" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" [[package]] name = "windows_x86_64_msvc" version = "0.42.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winreg" version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if", "windows-sys 0.48.0", ] [[package]] name = "zerocopy" version = "0.7.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e97e415490559a91254a2979b4829267a57d2fcd741a98eee8b722fb57289aa0" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" version = "0.7.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dd7e48ccf166952882ca8bd778a43502c64f33bf94c12ebe2a7f08e5a0f6689f" dependencies = [ "proc-macro2", "quote", "syn 2.0.39", ] [[package]] name = "zip" version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "760394e246e4c28189f19d488c058bf16f564016aefac5d32bb1f3b51d5e9261" dependencies = [ "byteorder", "crc32fast", "crossbeam-utils", "flate2", ]
0
hf_public_repos
hf_public_repos/text-generation-inference/README.md
<div align="center"> <a href="https://www.youtube.com/watch?v=jlMAX2Oaht0"> <img width=560 width=315 alt="Making TGI deployment optimal" src="https://huggingface.co/datasets/Narsil/tgi_assets/resolve/main/thumbnail.png"> </a> # Text Generation Inference <a href="https://github.com/huggingface/text-generation-inference"> <img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/huggingface/text-generation-inference?style=social"> </a> <a href="https://huggingface.github.io/text-generation-inference"> <img alt="Swagger API documentation" src="https://img.shields.io/badge/API-Swagger-informational"> </a> A Rust, Python and gRPC server for text generation inference. Used in production at [HuggingFace](https://huggingface.co) to power Hugging Chat, the Inference API and Inference Endpoint. </div> ## Table of contents - [Get Started](#get-started) - [API Documentation](#api-documentation) - [Using a private or gated model](#using-a-private-or-gated-model) - [A note on Shared Memory](#a-note-on-shared-memory-shm) - [Distributed Tracing](#distributed-tracing) - [Local Install](#local-install) - [CUDA Kernels](#cuda-kernels) - [Optimized architectures](#optimized-architectures) - [Run Falcon](#run-falcon) - [Run](#run) - [Quantization](#quantization) - [Develop](#develop) - [Testing](#testing) Text Generation Inference (TGI) is a toolkit for deploying and serving Large Language Models (LLMs). TGI enables high-performance text generation for the most popular open-source LLMs, including Llama, Falcon, StarCoder, BLOOM, GPT-NeoX, and [more](https://huggingface.co/docs/text-generation-inference/supported_models). TGI implements many features, such as: - Simple launcher to serve most popular LLMs - Production ready (distributed tracing with Open Telemetry, Prometheus metrics) - Tensor Parallelism for faster inference on multiple GPUs - Token streaming using Server-Sent Events (SSE) - Continuous batching of incoming requests for increased total throughput - Optimized transformers code for inference using [Flash Attention](https://github.com/HazyResearch/flash-attention) and [Paged Attention](https://github.com/vllm-project/vllm) on the most popular architectures - Quantization with [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) and [GPT-Q](https://arxiv.org/abs/2210.17323) - [Safetensors](https://github.com/huggingface/safetensors) weight loading - Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) - Logits warper (temperature scaling, top-p, top-k, repetition penalty, more details see [transformers.LogitsProcessor](https://huggingface.co/docs/transformers/internal/generation_utils#transformers.LogitsProcessor)) - Stop sequences - Log probabilities - Custom Prompt Generation: Easily generate text by providing custom prompts to guide the model's output - Fine-tuning Support: Utilize fine-tuned models for specific tasks to achieve higher accuracy and performance ## Get Started ### Docker For a detailed starting guide, please see the [Quick Tour](https://huggingface.co/docs/text-generation-inference/quicktour). The easiest way of getting started is using the official Docker container: ```shell model=HuggingFaceH4/zephyr-7b-beta volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.2 --model-id $model ``` And then you can make requests like ```bash curl 127.0.0.1:8080/generate \ -X POST \ -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' \ -H 'Content-Type: application/json' ``` **Note:** To use NVIDIA GPUs, you need to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). We also recommend using NVIDIA drivers with CUDA version 11.8 or higher. For running the Docker container on a machine with no GPUs or CUDA support, it is enough to remove the `--gpus all` flag and add `--disable-custom-kernels`, please note CPU is not the intended platform for this project, so performance might be subpar. **Note:** TGI supports AMD Instinct MI210 and MI250 GPUs. Details can be found in the [Supported Hardware documentation](https://huggingface.co/docs/text-generation-inference/supported_models#supported-hardware). To use AMD GPUs, please use `docker run --device /dev/kfd --device /dev/dri --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.2-rocm --model-id $model` instead of the command above. To see all options to serve your models (in the [code](https://github.com/huggingface/text-generation-inference/blob/main/launcher/src/main.rs) or in the cli): ``` text-generation-launcher --help ``` ### API documentation You can consult the OpenAPI documentation of the `text-generation-inference` REST API using the `/docs` route. The Swagger UI is also available at: [https://huggingface.github.io/text-generation-inference](https://huggingface.github.io/text-generation-inference). ### Using a private or gated model You have the option to utilize the `HUGGING_FACE_HUB_TOKEN` environment variable for configuring the token employed by `text-generation-inference`. This allows you to gain access to protected resources. For example, if you want to serve the gated Llama V2 model variants: 1. Go to https://huggingface.co/settings/tokens 2. Copy your cli READ token 3. Export `HUGGING_FACE_HUB_TOKEN=<your cli READ token>` or with Docker: ```shell model=meta-llama/Llama-2-7b-chat-hf volume=$PWD/data # share a volume with the Docker container to avoid downloading weights every run token=<your cli READ token> docker run --gpus all --shm-size 1g -e HUGGING_FACE_HUB_TOKEN=$token -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:1.2 --model-id $model ``` ### A note on Shared Memory (shm) [`NCCL`](https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/index.html) is a communication framework used by `PyTorch` to do distributed training/inference. `text-generation-inference` make use of `NCCL` to enable Tensor Parallelism to dramatically speed up inference for large language models. In order to share data between the different devices of a `NCCL` group, `NCCL` might fall back to using the host memory if peer-to-peer using NVLink or PCI is not possible. To allow the container to use 1G of Shared Memory and support SHM sharing, we add `--shm-size 1g` on the above command. If you are running `text-generation-inference` inside `Kubernetes`. You can also add Shared Memory to the container by creating a volume with: ```yaml - name: shm emptyDir: medium: Memory sizeLimit: 1Gi ``` and mounting it to `/dev/shm`. Finally, you can also disable SHM sharing by using the `NCCL_SHM_DISABLE=1` environment variable. However, note that this will impact performance. ### Distributed Tracing `text-generation-inference` is instrumented with distributed tracing using OpenTelemetry. You can use this feature by setting the address to an OTLP collector with the `--otlp-endpoint` argument. ### Architecture ![TGI architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/TGI.png) ### Local install You can also opt to install `text-generation-inference` locally. First [install Rust](https://rustup.rs/) and create a Python virtual environment with at least Python 3.9, e.g. using `conda`: ```shell curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh conda create -n text-generation-inference python=3.9 conda activate text-generation-inference ``` You may also need to install Protoc. On Linux: ```shell PROTOC_ZIP=protoc-21.12-linux-x86_64.zip curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v21.12/$PROTOC_ZIP sudo unzip -o $PROTOC_ZIP -d /usr/local bin/protoc sudo unzip -o $PROTOC_ZIP -d /usr/local 'include/*' rm -f $PROTOC_ZIP ``` On MacOS, using Homebrew: ```shell brew install protobuf ``` Then run: ```shell BUILD_EXTENSIONS=True make install # Install repository and HF/transformer fork with CUDA kernels make run-falcon-7b-instruct ``` **Note:** on some machines, you may also need the OpenSSL libraries and gcc. On Linux machines, run: ```shell sudo apt-get install libssl-dev gcc -y ``` ### CUDA Kernels The custom CUDA kernels are only tested on NVIDIA A100, AMD MI210 and AMD MI250. If you have any installation or runtime issues, you can remove the kernels by using the `DISABLE_CUSTOM_KERNELS=True` environment variable. Be aware that the official Docker image has them enabled by default. ## Optimized architectures TGI works out of the box to serve optimized models in [this list](https://huggingface.co/docs/text-generation-inference/supported_models). Other architectures are supported on a best-effort basis using: `AutoModelForCausalLM.from_pretrained(<model>, device_map="auto")` or `AutoModelForSeq2SeqLM.from_pretrained(<model>, device_map="auto")` ## Run Falcon ### Run ```shell make run-falcon-7b-instruct ``` ### Quantization You can also quantize the weights with bitsandbytes to reduce the VRAM requirement: ```shell make run-falcon-7b-instruct-quantize ``` 4bit quantization is available using the [NF4 and FP4 data types from bitsandbytes](https://arxiv.org/pdf/2305.14314.pdf). It can be enabled by providing `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` as a command line argument to `text-generation-launcher`. ## Develop ```shell make server-dev make router-dev ``` ## Testing ```shell # python make python-server-tests make python-client-tests # or both server and client tests make python-tests # rust cargo tests make rust-tests # integration tests make integration-tests ```
0
hf_public_repos
hf_public_repos/text-generation-inference/.dockerignore
aml target server/transformers server/flash-attention
0
hf_public_repos
hf_public_repos/text-generation-inference/rust-toolchain.toml
[toolchain] channel = "1.70.0" components = ["rustfmt", "clippy"]
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/benchmark/Cargo.toml
[package] name = "text-generation-benchmark" description = "Text Generation Benchmarking tool" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [lib] path = "src/lib.rs" [[bin]] name = "text-generation-benchmark" path = "src/main.rs" [dependencies] average = "0.14" clap = { version = "4.4.5", features = ["derive", "env"] } crossterm = "0.27" float-ord = "0.3.2" serde = {version = "1.0.188", features = ["derive"]} serde_json = "1.0" tabled = "0.14.0" text-generation-client = { path = "../router/client" } thiserror = "1.0.48" tokenizers = { version = "0.14.0", features = ["http"] } tokio = { version = "1.32.0", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync", "macros"] } tui = {package = "ratatui", version = "0.23", default-features = false, features = ["crossterm"]} tracing = "0.1.37" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } hf-hub = "0.3.1"
0
hf_public_repos/text-generation-inference
hf_public_repos/text-generation-inference/benchmark/README.md
<div align="center"> # Text Generation Inference benchmarking tool ![benchmark](../assets/benchmark.png) </div> A lightweight benchmarking tool based inspired by [oha](https://github.com/hatoo/oha) and powered by [tui](https://github.com/tui-rs-revival/ratatui). ## Install ```shell make install-benchmark ``` ## Run First, start `text-generation-inference`: ```shell text-generation-launcher --model-id bigscience/bloom-560m ``` Then run the benchmarking tool: ```shell text-generation-benchmark --tokenizer-name bigscience/bloom-560m ```
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/generation.rs
use std::time::{Duration, Instant}; use text_generation_client::{ Batch, CachedBatch, ClientError, NextTokenChooserParameters, Request, ShardedClient, StoppingCriteriaParameters, }; use tokenizers::{Tokenizer, TruncationDirection}; use tokio::sync::{broadcast, mpsc}; const LOREM_IPSUM: &str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."; #[derive(Debug, Clone)] pub(crate) struct Prefill { pub(crate) latency: Duration, pub(crate) throughput: f64, } #[derive(Debug, Clone)] pub(crate) struct Decode { pub(crate) latency: Duration, pub(crate) token_latency: Duration, pub(crate) throughput: f64, } #[derive(Debug)] pub(crate) enum Message { Warmup, Prefill(Prefill), Decode(Decode), EndRun, EndBatch, } /// Benchmarking task #[allow(clippy::too_many_arguments)] pub(crate) async fn generation_task( tokenizer: Tokenizer, batch_size: Vec<u32>, sequence_length: u32, decode_length: u32, top_n_tokens: Option<u32>, n_runs: usize, warmups: usize, parameters: NextTokenChooserParameters, client: ShardedClient, run_sender: mpsc::Sender<Result<Message, ClientError>>, mut shutdown_receiver: broadcast::Receiver<()>, _shutdown_guard_sender: mpsc::Sender<()>, ) { // End task if a message is received on shutdown_receiver // _shutdown_guard_sender will be dropped once the task is finished tokio::select! { res = generate_runs(tokenizer, batch_size, sequence_length, decode_length, top_n_tokens, n_runs, warmups, parameters, client, run_sender.clone()) => { if let Err(err) = res { run_sender.send(Err(err)).await.unwrap_or(()); } }, _ = shutdown_receiver.recv() => {} } } /// Benchmark prefill/decode #[allow(clippy::too_many_arguments)] async fn generate_runs( tokenizer: Tokenizer, batch_size: Vec<u32>, sequence_length: u32, decode_length: u32, top_n_tokens: Option<u32>, n_runs: usize, warmups: usize, parameters: NextTokenChooserParameters, mut client: ShardedClient, run_sender: mpsc::Sender<Result<Message, ClientError>>, ) -> Result<(), ClientError> { // Create a dummy sequence let sequence = create_sequence(sequence_length, tokenizer); for b in batch_size { // Warmups on batch size for _ in 0..warmups { let (_, decode_batch) = prefill( sequence.clone(), sequence_length, b, decode_length, parameters.clone(), top_n_tokens, &mut client, ) .await?; let _ = decode(decode_batch, &mut client).await?; // Send warmup message run_sender.send(Ok(Message::Warmup)).await.unwrap_or(()); } for _ in 0..n_runs { let (prefill, decode_batch) = prefill( sequence.clone(), sequence_length, b, decode_length, parameters.clone(), top_n_tokens, &mut client, ) .await?; // Send prefill message run_sender .send(Ok(Message::Prefill(prefill))) .await .unwrap_or(()); let decode = decode(decode_batch, &mut client).await?; // Send decode message run_sender .send(Ok(Message::Decode(decode))) .await .unwrap_or(()); // Send run ended message run_sender.send(Ok(Message::EndRun)).await.unwrap_or(()); } // Batch ended run_sender.send(Ok(Message::EndBatch)).await.unwrap_or(()); } Ok(()) } // Run a prefill step async fn prefill( sequence: String, sequence_length: u32, batch_size: u32, decode_length: u32, parameters: NextTokenChooserParameters, top_n_tokens: Option<u32>, client: &mut ShardedClient, ) -> Result<(Prefill, CachedBatch), ClientError> { // Create requests let requests = (0..batch_size) .map(|id| Request { id: id.into(), prefill_logprobs: false, inputs: sequence.clone(), truncate: sequence_length, parameters: Some(parameters.clone()), stopping_parameters: Some(StoppingCriteriaParameters { max_new_tokens: decode_length, stop_sequences: vec![], ignore_eos_token: true, // Will not stop even if a eos token is generated }), top_n_tokens: top_n_tokens.unwrap_or(0), }) .collect(); let batch = Batch { id: 0, requests, size: batch_size, max_tokens: batch_size * (sequence_length + decode_length), }; // Run prefill let start_time = Instant::now(); let (_, decode_batch) = client.prefill(batch.clone()).await?; // Get latency let latency = start_time.elapsed(); // Compute throughput from latency and batch size let throughput = batch_size as f64 / latency.as_secs_f64(); // Decode batch cannot be empty let decode_batch = decode_batch.expect("decode_batch is None. This is a bug."); let step = Prefill { latency, throughput, }; Ok((step, decode_batch)) } /// Run a full decode async fn decode(batch: CachedBatch, client: &mut ShardedClient) -> Result<Decode, ClientError> { let mut decode_length = 0; let batch_size = batch.size; let start_time = Instant::now(); // Full decode over decode length let mut next_batch = Some(batch); while let Some(batch) = next_batch { let result = client.decode(vec![batch]).await?; next_batch = result.1; decode_length += 1; } // Get latency let latency = start_time.elapsed(); let token_latency = latency / decode_length; // Compute throughput from latency, batch size and decode length let throughput = (batch_size * decode_length) as f64 / latency.as_secs_f64(); let step = Decode { latency, token_latency, throughput, }; Ok(step) } /// Create a dummy sequence of the correct length fn create_sequence(sequence_length: u32, tokenizer: Tokenizer) -> String { let lorem_ipsum_length = tokenizer.encode(LOREM_IPSUM, true).unwrap().len(); // Repeat lorem ipsum to cover sequence length let string_sequence = LOREM_IPSUM.repeat((0..sequence_length).step_by(lorem_ipsum_length).len()); // Encode sequence let mut encoding = tokenizer.encode(string_sequence, true).unwrap(); // Truncate to sequence_length encoding.truncate(sequence_length as usize, 0, TruncationDirection::Left); // Decode tokenizer.decode(encoding.get_ids(), false).unwrap() }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/lib.rs
mod app; mod event; mod generation; mod table; mod utils; use crate::app::App; use crate::event::Event; use crossterm::ExecutableCommand; use std::io; use text_generation_client::{NextTokenChooserParameters, ShardedClient}; use tokenizers::Tokenizer; use tokio::sync::{broadcast, mpsc}; use tui::backend::CrosstermBackend; use tui::Terminal; /// Run benchmarking app #[allow(clippy::too_many_arguments)] pub async fn run( tokenizer_name: String, tokenizer: Tokenizer, batch_size: Vec<u32>, sequence_length: u32, decode_length: u32, top_n_tokens: Option<u32>, n_runs: usize, warmups: usize, temperature: Option<f32>, top_k: Option<u32>, top_p: Option<f32>, typical_p: Option<f32>, repetition_penalty: Option<f32>, watermark: bool, do_sample: bool, client: ShardedClient, ) -> Result<(), std::io::Error> { let parameters = NextTokenChooserParameters { temperature: temperature.unwrap_or(1.0), top_k: top_k.unwrap_or(0), top_p: top_p.unwrap_or(1.0), typical_p: typical_p.unwrap_or(1.0), do_sample, seed: 0, repetition_penalty: repetition_penalty.unwrap_or(1.0), watermark, }; // Initialize terminal properties crossterm::terminal::enable_raw_mode()?; io::stdout().execute(crossterm::terminal::EnterAlternateScreen)?; io::stdout().execute(crossterm::cursor::Hide)?; // Initialize terminal let mut terminal = { let backend = CrosstermBackend::new(io::stdout()); Terminal::new(backend)? }; // Create message channel between generation_task and app let (run_sender, run_receiver) = mpsc::channel(8); // Crossterm event channel let (event_sender, mut event_receiver) = mpsc::channel(8); // Shutdown channel to terminate tasks let (shutdown_sender, _) = broadcast::channel(1); // Channel to check if tasks terminated let (shutdown_guard_sender, mut shutdown_guard_receiver) = mpsc::channel(1); // Create generation task tokio::spawn(generation::generation_task( tokenizer, batch_size.clone(), sequence_length, decode_length, top_n_tokens, n_runs, warmups, parameters, client, run_sender, shutdown_sender.subscribe(), shutdown_guard_sender.clone(), )); // Create event task tokio::spawn(event::terminal_event_task( 250, event_sender, shutdown_sender.subscribe(), shutdown_guard_sender.clone(), )); // Drop our end of shutdown sender drop(shutdown_guard_sender); // Create App let mut app = App::new( run_receiver, tokenizer_name.clone(), sequence_length, decode_length, n_runs, batch_size, ); while app.running { // Draw frame terminal.draw(|frame| app.render(frame))?; // Await a new event from event handling task match event_receiver.recv().await { None => break, // Update app state Some(event) => match event { Event::Tick => app.tick(), Event::Key(key_event) => app.handle_key_event(key_event), _ => {} }, } } // Ask tasks to shutdown let _ = shutdown_sender.send(()); // Wait for tasks to shutdown let _ = shutdown_guard_receiver.recv().await; // Revert terminal to original view io::stdout().execute(crossterm::terminal::LeaveAlternateScreen)?; crossterm::terminal::disable_raw_mode()?; io::stdout().execute(crossterm::cursor::Show)?; let parameters_table = table::parameters_table( tokenizer_name, sequence_length, decode_length, top_n_tokens, n_runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, watermark, do_sample, ); println!("\n{parameters_table}\n"); let latency_table = table::latency_table(&app.data); println!("\n{latency_table}\n"); let throughput_table = table::throughput_table(&app.data); println!("\n{throughput_table}\n"); Ok(()) }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/event.rs
/// Inspired by https://github.com/orhun/rust-tui-template/blob/472aa515119d4c94903eac12d9784417281dc7f5/src/event.rs use crossterm::event; use std::time::{Duration, Instant}; use tokio::sync::{broadcast, mpsc}; /// Events #[derive(Debug)] pub(crate) enum Event { /// Terminal tick. Tick, /// Key press. Key(event::KeyEvent), /// Terminal resize. Resize(u16, u16), } pub(crate) async fn terminal_event_task( fps: u32, event_sender: mpsc::Sender<Event>, mut shutdown_receiver: broadcast::Receiver<()>, _shutdown_guard_sender: mpsc::Sender<()>, ) { // End task if a message is received on shutdown_receiver // _shutdown_guard_sender will be dropped once the task is finished tokio::select! { _ = event_loop(fps, event_sender) => { }, _ = shutdown_receiver.recv() => {} } } /// Main event loop async fn event_loop(fps: u32, event_sender: mpsc::Sender<Event>) { // Frame budget let per_frame = Duration::from_secs(1) / fps; // When was last frame executed let mut last_frame = Instant::now(); loop { // Sleep to avoid blocking the thread for too long if let Some(sleep) = per_frame.checked_sub(last_frame.elapsed()) { tokio::time::sleep(sleep).await; } // Get crossterm event and send a new one over the channel if event::poll(Duration::from_secs(0)).expect("no events available") { match event::read().expect("unable to read event") { event::Event::Key(e) => event_sender.send(Event::Key(e)).await.unwrap_or(()), event::Event::Resize(w, h) => { event_sender.send(Event::Resize(w, h)).await.unwrap_or(()) } _ => (), } } // Frame budget exceeded if last_frame.elapsed() >= per_frame { // Send tick event_sender.send(Event::Tick).await.unwrap_or(()); // Rest last_frame time last_frame = Instant::now(); } } }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/table.rs
use crate::app::Data; use tabled::settings::Merge; use tabled::{builder::Builder, settings::Style, Table}; #[allow(clippy::too_many_arguments)] pub(crate) fn parameters_table( tokenizer_name: String, sequence_length: u32, decode_length: u32, top_n_tokens: Option<u32>, n_runs: usize, warmups: usize, temperature: Option<f32>, top_k: Option<u32>, top_p: Option<f32>, typical_p: Option<f32>, repetition_penalty: Option<f32>, watermark: bool, do_sample: bool, ) -> Table { let mut builder = Builder::default(); builder.set_header(["Parameter", "Value"]); builder.push_record(["Model", &tokenizer_name]); builder.push_record(["Sequence Length", &sequence_length.to_string()]); builder.push_record(["Decode Length", &decode_length.to_string()]); builder.push_record(["Top N Tokens", &format!("{top_n_tokens:?}")]); builder.push_record(["N Runs", &n_runs.to_string()]); builder.push_record(["Warmups", &warmups.to_string()]); builder.push_record(["Temperature", &format!("{temperature:?}")]); builder.push_record(["Top K", &format!("{top_k:?}")]); builder.push_record(["Top P", &format!("{top_p:?}")]); builder.push_record(["Typical P", &format!("{typical_p:?}")]); builder.push_record(["Repetition Penalty", &format!("{repetition_penalty:?}")]); builder.push_record(["Watermark", &watermark.to_string()]); builder.push_record(["Do Sample", &do_sample.to_string()]); let mut table = builder.build(); table.with(Style::markdown()); table } pub(crate) fn latency_table(data: &Data) -> Table { let mut builder = Builder::default(); builder.set_header([ "Step", "Batch Size", "Average", "Lowest", "Highest", "p50", "p90", "p99", ]); add_latencies( &mut builder, "Prefill", &data.batch_size, &data.prefill_latencies, ); add_latencies( &mut builder, "Decode (token)", &data.batch_size, &data.decode_token_latencies, ); add_latencies( &mut builder, "Decode (total)", &data.batch_size, &data.decode_latencies, ); let mut table = builder.build(); table.with(Style::markdown()).with(Merge::vertical()); table } pub(crate) fn throughput_table(data: &Data) -> Table { let mut builder = Builder::default(); builder.set_header(["Step", "Batch Size", "Average", "Lowest", "Highest"]); add_throuhgputs( &mut builder, "Prefill", &data.batch_size, &data.prefill_throughputs, ); add_throuhgputs( &mut builder, "Decode", &data.batch_size, &data.decode_throughputs, ); let mut table = builder.build(); table.with(Style::markdown()).with(Merge::vertical()); table } fn add_latencies( builder: &mut Builder, step: &'static str, batch_size: &[u32], batch_latencies: &[Vec<f64>], ) { for (i, b) in batch_size.iter().enumerate() { let latencies = &batch_latencies[i]; let (avg, min, max) = avg_min_max(latencies); let row = [ step, &b.to_string(), &format_value(avg, "ms"), &format_value(min, "ms"), &format_value(max, "ms"), &format_value(px(latencies, 50), "ms"), &format_value(px(latencies, 90), "ms"), &format_value(px(latencies, 99), "ms"), ]; builder.push_record(row); } } fn add_throuhgputs( builder: &mut Builder, step: &'static str, batch_size: &[u32], batch_throughputs: &[Vec<f64>], ) { for (i, b) in batch_size.iter().enumerate() { let throughputs = &batch_throughputs[i]; let (avg, min, max) = avg_min_max(throughputs); let row = [ step, &b.to_string(), &format_value(avg, "tokens/secs"), &format_value(min, "tokens/secs"), &format_value(max, "tokens/secs"), ]; builder.push_record(row); } } fn avg_min_max(data: &Vec<f64>) -> (f64, f64, f64) { let average = data.iter().sum::<f64>() / data.len() as f64; let min = data .iter() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let max = data .iter() .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); (average, *min, *max) } fn px(data: &Vec<f64>, p: u32) -> f64 { let i = (f64::from(p) / 100.0 * data.len() as f64) as usize; *data.get(i).unwrap_or(&std::f64::NAN) } fn format_value(value: f64, unit: &'static str) -> String { format!("{:.2} {unit}", value) }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/main.rs
/// Text Generation Inference benchmarking tool /// /// Inspired by the great Oha app: https://github.com/hatoo/oha /// and: https://github.com/orhun/rust-tui-template use clap::Parser; use std::path::Path; use text_generation_client::ShardedClient; use tokenizers::{FromPretrainedParameters, Tokenizer}; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::EnvFilter; /// App Configuration #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Args { /// The name of the tokenizer (as in model_id on the huggingface hub, or local path). #[clap(short, long, env)] tokenizer_name: String, /// The revision to use for the tokenizer if on the hub. #[clap(default_value = "main", long, env)] revision: String, /// The various batch sizes to benchmark for, the idea is to get enough /// batching to start seeing increased latency, this usually means you're /// moving from memory bound (usual as BS=1) to compute bound, and this is /// a sweet spot for the maximum batch size for the model under test #[clap(short, long)] batch_size: Option<Vec<u32>>, /// This is the initial prompt sent to the text-generation-server length /// in token. Longer prompt will slow down the benchmark. Usually the /// latency grows somewhat linearly with this for the prefill step. /// /// Most importantly, the prefill step is usually not the one dominating /// your runtime, so it's ok to keep it short. #[clap(default_value = "10", short, long, env)] sequence_length: u32, /// This is how many tokens will be generated by the server and averaged out /// to give the `decode` latency. This is the *critical* number you want to optimize for /// LLM spend most of their time doing decoding. /// /// Decode latency is usually quite stable. #[clap(default_value = "8", short, long, env)] decode_length: u32, ///How many runs should we average from #[clap(default_value = "10", short, long, env)] runs: usize, /// Number of warmup cycles #[clap(default_value = "1", short, long, env)] warmups: usize, /// The location of the grpc socket. This benchmark tool bypasses the router /// completely and directly talks to the gRPC processes #[clap(default_value = "/tmp/text-generation-server-0", short, long, env)] master_shard_uds_path: String, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] temperature: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_k: Option<u32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_p: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] typical_p: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] repetition_penalty: Option<f32>, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] watermark: bool, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] do_sample: bool, /// Generation parameter in case you want to specifically test/debug particular /// decoding strategies, for full doc refer to the `text-generation-server` #[clap(long, env)] top_n_tokens: Option<u32>, } fn main() -> Result<(), Box<dyn std::error::Error>> { init_logging(); // Get args let args = Args::parse(); // Pattern match configuration let Args { tokenizer_name, revision, batch_size, sequence_length, decode_length, runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, watermark, do_sample, master_shard_uds_path, top_n_tokens, } = args; let batch_size = batch_size.unwrap_or(vec![1, 2, 4, 8, 16, 32]); // Tokenizer instance // This will only be used to validate payloads tracing::info!("Loading tokenizer"); let local_path = Path::new(&tokenizer_name); let tokenizer = if local_path.exists() && local_path.is_dir() && local_path.join("tokenizer.json").exists() { // Load local tokenizer tracing::info!("Found local tokenizer"); Tokenizer::from_file(local_path.join("tokenizer.json")).unwrap() } else { tracing::info!("Downloading tokenizer"); // Parse Huggingface hub token let auth_token = std::env::var("HUGGING_FACE_HUB_TOKEN").ok(); // Download and instantiate tokenizer // We need to download it outside of the Tokio runtime let params = FromPretrainedParameters { revision, auth_token, ..Default::default() }; Tokenizer::from_pretrained(tokenizer_name.clone(), Some(params)).unwrap() }; tracing::info!("Tokenizer loaded"); // Launch Tokio runtime tokio::runtime::Builder::new_multi_thread() .enable_all() .build() .unwrap() .block_on(async { // Instantiate sharded client from the master unix socket tracing::info!("Connect to model server"); let mut sharded_client = ShardedClient::connect_uds(master_shard_uds_path) .await .expect("Could not connect to server"); // Clear the cache; useful if the webserver rebooted sharded_client .clear_cache(None) .await .expect("Unable to clear cache"); tracing::info!("Connected"); // Run app text_generation_benchmark::run( tokenizer_name, tokenizer, batch_size, sequence_length, decode_length, top_n_tokens, runs, warmups, temperature, top_k, top_p, typical_p, repetition_penalty, watermark, do_sample, sharded_client, ) .await .unwrap(); }); Ok(()) } /// Init logging using LOG_LEVEL fn init_logging() { // STDOUT/STDERR layer let fmt_layer = tracing_subscriber::fmt::layer() .with_file(true) .with_line_number(true); // Filter events with LOG_LEVEL let env_filter = EnvFilter::try_from_env("LOG_LEVEL").unwrap_or_else(|_| EnvFilter::new("info")); tracing_subscriber::registry() .with(env_filter) .with(fmt_layer) .init(); }
0
hf_public_repos/text-generation-inference/benchmark
hf_public_repos/text-generation-inference/benchmark/src/app.rs
/// Inspired by https://github.com/hatoo/oha/blob/bb989ea3cd77727e7743e7daa60a19894bb5e901/src/monitor.rs use crate::generation::{Decode, Message, Prefill}; use crossterm::event::{KeyCode, KeyEvent, KeyModifiers}; use text_generation_client::ClientError; use tokio::sync::mpsc; use tui::backend::Backend; use tui::layout::{Alignment, Constraint, Direction, Layout}; use tui::style::{Color, Modifier, Style}; use tui::text::{Line, Span}; use tui::widgets::{ Axis, BarChart, Block, Borders, Chart, Dataset, Gauge, GraphType, Paragraph, Tabs, }; use tui::{symbols, Frame}; /// TUI powered App pub(crate) struct App { pub(crate) running: bool, pub(crate) data: Data, completed_runs: Vec<usize>, completed_batch: usize, current_batch: usize, current_tab: usize, touched_tab: bool, zoom: bool, is_error: bool, tokenizer_name: String, sequence_length: u32, decode_length: u32, n_run: usize, receiver: mpsc::Receiver<Result<Message, ClientError>>, } impl App { pub(crate) fn new( receiver: mpsc::Receiver<Result<Message, ClientError>>, tokenizer_name: String, sequence_length: u32, decode_length: u32, n_run: usize, batch_size: Vec<u32>, ) -> Self { let current_tab = 0; let completed_runs: Vec<usize> = (0..batch_size.len()).map(|_| 0).collect(); let completed_batch = 0; let current_batch = 0; let is_error = false; let data = Data::new(n_run, batch_size); Self { running: true, data, completed_runs, completed_batch, current_batch, current_tab, touched_tab: false, zoom: false, is_error, tokenizer_name, sequence_length, decode_length, n_run, receiver, } } /// Handle crossterm key events pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) { match key_event { // Increase and wrap tab KeyEvent { code: KeyCode::Right, .. } | KeyEvent { code: KeyCode::Tab, .. } => { self.touched_tab = true; self.current_tab = (self.current_tab + 1) % self.data.batch_size.len(); } // Decrease and wrap tab KeyEvent { code: KeyCode::Left, .. } => { self.touched_tab = true; if self.current_tab > 0 { self.current_tab -= 1; } else { self.current_tab = self.data.batch_size.len() - 1; } } // Zoom on throughput/latency fig KeyEvent { code: KeyCode::Char('+'), .. } => { self.zoom = true; } // Unzoom on throughput/latency fig KeyEvent { code: KeyCode::Char('-'), .. } => { self.zoom = false; } // Quit KeyEvent { code: KeyCode::Char('q'), .. } | KeyEvent { code: KeyCode::Char('c'), modifiers: KeyModifiers::CONTROL, .. } => { self.running = false; } _ => (), } } /// Get all pending messages from generation task pub(crate) fn tick(&mut self) { while let Ok(message) = self.receiver.try_recv() { match message { Ok(message) => match message { Message::Prefill(step) => self.data.push_prefill(step, self.current_batch), Message::Decode(step) => self.data.push_decode(step, self.current_batch), Message::EndRun => { self.completed_runs[self.current_batch] += 1; } Message::EndBatch => { self.data.end_batch(self.current_batch); self.completed_batch += 1; if self.current_batch < self.data.batch_size.len() - 1 { // Only go to next tab if the user never touched the tab keys if !self.touched_tab { self.current_tab += 1; } self.current_batch += 1; } } Message::Warmup => {} }, Err(_) => self.is_error = true, } } } /// Render frame pub fn render<B: Backend>(&mut self, f: &mut Frame<'_, B>) { let batch_progress = (self.completed_batch as f64 / self.data.batch_size.len() as f64).clamp(0.0, 1.0); let run_progress = (self.completed_runs[self.current_batch] as f64 / self.n_run as f64).clamp(0.0, 1.0); // Vertical layout let row5 = Layout::default() .direction(Direction::Vertical) .constraints( [ Constraint::Length(1), Constraint::Length(3), Constraint::Length(3), Constraint::Length(13), Constraint::Min(10), ] .as_ref(), ) .split(f.size()); // Top row horizontal layout let top = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) .split(row5[2]); // Mid row horizontal layout let mid = Layout::default() .direction(Direction::Horizontal) .constraints( [ Constraint::Percentage(25), Constraint::Percentage(25), Constraint::Percentage(25), Constraint::Percentage(25), ] .as_ref(), ) .split(row5[3]); // Left mid row vertical layout let prefill_text = Layout::default() .direction(Direction::Vertical) .constraints([Constraint::Length(8), Constraint::Length(5)].as_ref()) .split(mid[0]); // Right mid row vertical layout let decode_text = Layout::default() .direction(Direction::Vertical) .constraints([Constraint::Length(8), Constraint::Length(5)].as_ref()) .split(mid[2]); let decode_text_latency = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) .split(decode_text[0]); // Bottom row horizontal layout let bottom = Layout::default() .direction(Direction::Horizontal) .constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref()) .split(row5[4]); // Title let title = Block::default() .borders(Borders::NONE) .title(format!( "Model: {} | Sequence Length: {} | Decode Length: {}", self.tokenizer_name, self.sequence_length, self.decode_length )) .style( Style::default() .add_modifier(Modifier::BOLD) .fg(Color::White), ); f.render_widget(title, row5[0]); // Helper let helper = Block::default() .borders(Borders::NONE) .title("<- | tab | ->: change batch tab | q / CTRL + c: quit | +/-: zoom") .title_alignment(Alignment::Right) .style(Style::default().fg(Color::White)); f.render_widget(helper, row5[0]); // Batch tabs let titles = self .data .batch_size .iter() .map(|b| { Line::from(vec![Span::styled( format!("Batch: {b}"), Style::default().fg(Color::White), )]) }) .collect(); let tabs = Tabs::new(titles) .block(Block::default().borders(Borders::ALL).title("Tabs")) .select(self.current_tab) .style(Style::default().fg(Color::LightCyan)) .highlight_style( Style::default() .add_modifier(Modifier::BOLD) .bg(Color::Black), ); f.render_widget(tabs, row5[1]); // Total progress bar let color = if self.is_error { Color::Red } else { Color::LightGreen }; let batch_gauge = progress_gauge( "Total Progress", format!("{} / {}", self.completed_batch, self.data.batch_size.len()), batch_progress, color, ); f.render_widget(batch_gauge, top[0]); // Batch progress Bar let color = if self.is_error { Color::Red } else { Color::LightBlue }; let run_gauge = progress_gauge( "Batch Progress", format!( "{} / {}", self.completed_runs[self.current_batch], self.n_run ), run_progress, color, ); f.render_widget(run_gauge, top[1]); // Prefill text infos let prefill_latency_block = latency_paragraph( &mut self.data.prefill_latencies[self.current_tab], "Prefill", ); let prefill_throughput_block = throughput_paragraph(&self.data.prefill_throughputs[self.current_tab], "Prefill"); f.render_widget(prefill_latency_block, prefill_text[0]); f.render_widget(prefill_throughput_block, prefill_text[1]); // Prefill latency histogram let histo_width = 7; let bins = if mid[1].width < 2 { 0 } else { (mid[1].width as usize - 2) / (histo_width + 1) } .max(2); let histo_data = latency_histogram_data(&self.data.prefill_latencies[self.current_tab], bins); let histo_data_str: Vec<(&str, u64)> = histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect(); let prefill_histogram = latency_histogram(&histo_data_str, "Prefill").bar_width(histo_width as u16); f.render_widget(prefill_histogram, mid[1]); // Decode text info let decode_latency_block = latency_paragraph( &mut self.data.decode_latencies[self.current_tab], "Decode Total", ); let decode_token_latency_block = latency_paragraph( &mut self.data.decode_token_latencies[self.current_tab], "Decode Token", ); let decode_throughput_block = throughput_paragraph(&self.data.decode_throughputs[self.current_tab], "Decode"); f.render_widget(decode_latency_block, decode_text_latency[0]); f.render_widget(decode_token_latency_block, decode_text_latency[1]); f.render_widget(decode_throughput_block, decode_text[1]); // Decode latency histogram let histo_data = latency_histogram_data(&self.data.decode_latencies[self.current_tab], bins); let histo_data_str: Vec<(&str, u64)> = histo_data.iter().map(|(l, v)| (l.as_str(), *v)).collect(); let decode_histogram = latency_histogram(&histo_data_str, "Decode").bar_width(histo_width as u16); f.render_widget(decode_histogram, mid[3]); // Prefill latency/throughput chart let prefill_latency_throughput_chart = latency_throughput_chart( &self.data.prefill_batch_latency_throughput, &self.data.batch_size, self.zoom, "Prefill", ); f.render_widget(prefill_latency_throughput_chart, bottom[0]); // Decode latency/throughput chart let decode_latency_throughput_chart = latency_throughput_chart( &self.data.decode_batch_latency_throughput, &self.data.batch_size, self.zoom, "Decode", ); f.render_widget(decode_latency_throughput_chart, bottom[1]); } } /// App internal data struct pub(crate) struct Data { pub(crate) batch_size: Vec<u32>, pub(crate) prefill_latencies: Vec<Vec<f64>>, pub(crate) prefill_throughputs: Vec<Vec<f64>>, pub(crate) decode_latencies: Vec<Vec<f64>>, pub(crate) decode_token_latencies: Vec<Vec<f64>>, pub(crate) decode_throughputs: Vec<Vec<f64>>, pub(crate) prefill_batch_latency_throughput: Vec<(f64, f64)>, pub(crate) decode_batch_latency_throughput: Vec<(f64, f64)>, } impl Data { fn new(n_run: usize, batch_size: Vec<u32>) -> Self { let prefill_latencies: Vec<Vec<f64>> = (0..batch_size.len()) .map(|_| Vec::with_capacity(n_run)) .collect(); let prefill_throughputs: Vec<Vec<f64>> = prefill_latencies.clone(); let decode_latencies: Vec<Vec<f64>> = prefill_latencies.clone(); let decode_token_latencies: Vec<Vec<f64>> = decode_latencies.clone(); let decode_throughputs: Vec<Vec<f64>> = prefill_throughputs.clone(); let prefill_batch_latency_throughput: Vec<(f64, f64)> = Vec::with_capacity(batch_size.len()); let decode_batch_latency_throughput: Vec<(f64, f64)> = prefill_batch_latency_throughput.clone(); Self { batch_size, prefill_latencies, prefill_throughputs, decode_latencies, decode_token_latencies, decode_throughputs, prefill_batch_latency_throughput, decode_batch_latency_throughput, } } fn push_prefill(&mut self, prefill: Prefill, batch_idx: usize) { let latency = prefill.latency.as_micros() as f64 / 1000.0; self.prefill_latencies[batch_idx].push(latency); self.prefill_throughputs[batch_idx].push(prefill.throughput); } fn push_decode(&mut self, decode: Decode, batch_idx: usize) { let latency = decode.latency.as_micros() as f64 / 1000.0; let token_latency = decode.token_latency.as_micros() as f64 / 1000.0; self.decode_latencies[batch_idx].push(latency); self.decode_token_latencies[batch_idx].push(token_latency); self.decode_throughputs[batch_idx].push(decode.throughput); } fn end_batch(&mut self, batch_idx: usize) { self.prefill_batch_latency_throughput.push(( self.prefill_latencies[batch_idx].iter().sum::<f64>() / self.prefill_latencies[batch_idx].len() as f64, self.prefill_throughputs[batch_idx].iter().sum::<f64>() / self.prefill_throughputs[batch_idx].len() as f64, )); self.decode_batch_latency_throughput.push(( self.decode_latencies[batch_idx].iter().sum::<f64>() / self.decode_latencies[batch_idx].len() as f64, self.decode_throughputs[batch_idx].iter().sum::<f64>() / self.decode_throughputs[batch_idx].len() as f64, )); } } /// Progress bar fn progress_gauge(title: &str, label: String, progress: f64, color: Color) -> Gauge { Gauge::default() .block(Block::default().title(title).borders(Borders::ALL)) .gauge_style(Style::default().fg(color)) .label(Span::raw(label)) .ratio(progress) } /// Throughput paragraph fn throughput_paragraph<'a>(throughput: &Vec<f64>, name: &'static str) -> Paragraph<'a> { // Throughput average/high/low texts let throughput_texts = statis_spans(throughput, "tokens/secs"); // Throughput block Paragraph::new(throughput_texts).block( Block::default() .title(Span::raw(format!("{name} Throughput"))) .borders(Borders::ALL), ) } /// Latency paragraph fn latency_paragraph<'a>(latency: &mut Vec<f64>, name: &'static str) -> Paragraph<'a> { // Latency average/high/low texts let mut latency_texts = statis_spans(latency, "ms"); // Sort latency for percentiles float_ord::sort(latency); let latency_percentiles = crate::utils::percentiles(latency, &[50, 90, 99]); // Latency p50/p90/p99 texts let colors = vec![Color::LightGreen, Color::LightYellow, Color::LightRed]; for (i, (name, value)) in latency_percentiles.iter().enumerate() { let span = Line::from(vec![Span::styled( format!("{name}: {value:.2} ms"), Style::default().fg(colors[i]), )]); latency_texts.push(span); } Paragraph::new(latency_texts).block( Block::default() .title(Span::raw(format!("{name} Latency"))) .borders(Borders::ALL), ) } /// Average/High/Low spans fn statis_spans<'a>(data: &Vec<f64>, unit: &'static str) -> Vec<Line<'a>> { vec![ Line::from(vec![Span::styled( format!( "Average: {:.2} {unit}", data.iter().sum::<f64>() / data.len() as f64 ), Style::default().fg(Color::LightBlue), )]), Line::from(vec![Span::styled( format!( "Lowest: {:.2} {unit}", data.iter() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN) ), Style::default().fg(Color::Reset), )]), Line::from(vec![Span::styled( format!( "Highest: {:.2} {unit}", data.iter() .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN) ), Style::default().fg(Color::Reset), )]), ] } /// Latency histogram data fn latency_histogram_data(latency: &[f64], bins: usize) -> Vec<(String, u64)> { let histo_data: Vec<(String, u64)> = { let histo = crate::utils::histogram(latency, bins); histo .into_iter() .map(|(label, v)| (format!("{label:.2}"), v as u64)) .collect() }; histo_data } /// Latency Histogram fn latency_histogram<'a>( histo_data_str: &'a Vec<(&'a str, u64)>, name: &'static str, ) -> BarChart<'a> { BarChart::default() .block( Block::default() .title(format!("{name} latency histogram")) .style(Style::default().fg(Color::LightYellow).bg(Color::Reset)) .borders(Borders::ALL), ) .data(histo_data_str.as_slice()) } /// Latency/Throughput chart fn latency_throughput_chart<'a>( latency_throughput: &'a Vec<(f64, f64)>, batch_sizes: &'a [u32], zoom: bool, name: &'static str, ) -> Chart<'a> { let latency_iter = latency_throughput.iter().map(|(l, _)| l); let throughput_iter = latency_throughput.iter().map(|(_, t)| t); // Get extreme values let min_latency: f64 = *latency_iter .clone() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let max_latency: f64 = *latency_iter .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let min_throughput: f64 = *throughput_iter .clone() .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); let max_throughput: f64 = *throughput_iter .max_by(|a, b| a.total_cmp(b)) .unwrap_or(&std::f64::NAN); // Char min max values let min_x = if zoom { ((min_latency - 0.05 * min_latency) / 100.0).floor() * 100.0 } else { 0.0 }; let max_x = ((max_latency + 0.05 * max_latency) / 100.0).ceil() * 100.0; let step_x = (max_x - min_x) / 4.0; // Chart min max values let min_y = if zoom { ((min_throughput - 0.05 * min_throughput) / 100.0).floor() * 100.0 } else { 0.0 }; let max_y = ((max_throughput + 0.05 * max_throughput) / 100.0).ceil() * 100.0; let step_y = (max_y - min_y) / 4.0; // Labels let mut x_labels = vec![Span::styled( format!("{min_x:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )]; for i in 0..3 { x_labels.push(Span::styled( format!("{:.2}", min_x + ((i + 1) as f64 * step_x)), Style::default().fg(Color::Gray).bg(Color::Reset), )); } x_labels.push(Span::styled( format!("{max_x:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )); // Labels let mut y_labels = vec![Span::styled( format!("{min_y:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )]; for i in 0..3 { y_labels.push(Span::styled( format!("{:.2}", min_y + ((i + 1) as f64 * step_y)), Style::default().fg(Color::Gray).bg(Color::Reset), )); } y_labels.push(Span::styled( format!("{max_y:.2}"), Style::default() .add_modifier(Modifier::BOLD) .fg(Color::Gray) .bg(Color::Reset), )); // Chart dataset let colors = color_vec(); let datasets: Vec<Dataset> = (0..latency_throughput.len()) .map(|i| { let color_idx = i % colors.len(); Dataset::default() .name(batch_sizes[i].to_string()) .marker(symbols::Marker::Block) .style(Style::default().fg(colors[color_idx])) .graph_type(GraphType::Scatter) .data(&latency_throughput[i..(i + 1)]) }) .collect(); // Chart Chart::new(datasets) .style(Style::default().fg(Color::Cyan).bg(Color::Reset)) .block( Block::default() .title(Span::styled( format!("{name} throughput over latency"), Style::default().fg(Color::Gray).bg(Color::Reset), )) .borders(Borders::ALL), ) .x_axis( Axis::default() .title("ms") .style(Style::default().fg(Color::Gray).bg(Color::Reset)) .labels(x_labels) .bounds([min_x, max_x]), ) .y_axis( Axis::default() .title("tokens/secs") .style(Style::default().fg(Color::Gray).bg(Color::Reset)) .labels(y_labels) .bounds([min_y, max_y]), ) } // Colors for latency/throughput chart fn color_vec() -> Vec<Color> { vec![ Color::Red, Color::Green, Color::Yellow, Color::Blue, Color::Magenta, Color::Cyan, Color::Gray, Color::DarkGray, Color::LightRed, Color::LightGreen, Color::LightYellow, Color::LightBlue, Color::LightMagenta, Color::LightCyan, ] }
0