text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
<jupyter_start><jupyter_code>import argparse import json import logging import math import os import random from pathlib import Path from tqdm import tqdm import datasets from datasets import load_dataset, DatasetDict import evaluate import torch from torch import nn from torch.utils.data import DataLoader import transformers from transformers import AutoTokenizer, AutoModel, default_data_collator, SchedulerType, get_scheduler from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version from huggingface_hub import Repository, create_repo from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from peft import PeftModel import hnswlib class AutoModelForSentenceEmbedding(nn.Module): def __init__(self, model_name, tokenizer, normalize=True): super(AutoModelForSentenceEmbedding, self).__init__() self.model = AutoModel.from_pretrained(model_name) # , quantizaton_config=BitsAndBytesConfig(load_in_8bit=True), device_map={"":0}) self.normalize = normalize self.tokenizer = tokenizer def forward(self, **kwargs): model_output = self.model(**kwargs) embeddings = self.mean_pooling(model_output, kwargs["attention_mask"]) if self.normalize: embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) return embeddings def mean_pooling(self, model_output, attention_mask): token_embeddings = model_output[0] # First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.model, name) def get_cosing_embeddings(query_embs, product_embs): return torch.sum(query_embs * product_embs, axis=1) model_name_or_path = "intfloat/e5-large-v2" peft_model_id = "smangrul/peft_lora_e5_semantic_search" dataset_name = "smangrul/amazon_esci" max_length = 70 batch_size = 256 import pandas as pd tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) dataset = load_dataset(dataset_name, revision="main") train_product_dataset = dataset["train"].to_pandas()[["product_title"]] val_product_dataset = dataset["validation"].to_pandas()[["product_title"]] product_dataset_for_indexing = pd.concat([train_product_dataset, val_product_dataset]) product_dataset_for_indexing = product_dataset_for_indexing.drop_duplicates() product_dataset_for_indexing.reset_index(drop=True, inplace=True) product_dataset_for_indexing.reset_index(inplace=True) product_dataset_for_indexing pd.set_option("max_colwidth", 300) product_dataset_for_indexing.sample(10) from datasets import Dataset dataset = Dataset.from_pandas(product_dataset_for_indexing) def preprocess_function(examples): products = examples["product_title"] result = tokenizer(products, padding="max_length", max_length=70, truncation=True) return result processed_dataset = dataset.map( preprocess_function, batched=True, remove_columns=dataset.column_names, desc="Running tokenizer on dataset", ) processed_dataset # base model model = AutoModelForSentenceEmbedding(model_name_or_path, tokenizer) # peft config and wrapping model = PeftModel.from_pretrained(model, peft_model_id) print(model) dataloader = DataLoader( processed_dataset, shuffle=False, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True, ) next(iter(dataloader)) ids_to_products_dict = {i: p for i, p in zip(dataset["index"], dataset["product_title"])} ids_to_products_dict device = "cuda" model.to(device) model.eval() model = model.merge_and_unload() import numpy as np num_products = len(dataset) d = 1024 product_embeddings_array = np.zeros((num_products, d)) for step, batch in enumerate(tqdm(dataloader)): with torch.no_grad(): with torch.amp.autocast(dtype=torch.bfloat16, device_type="cuda"): product_embs = model(**{k: v.to(device) for k, v in batch.items()}).detach().float().cpu() start_index = step * batch_size end_index = start_index + batch_size if (start_index + batch_size) < num_products else num_products product_embeddings_array[start_index:end_index] = product_embs del product_embs, batch def construct_search_index(dim, num_elements, data): # Declaring index search_index = hnswlib.Index(space="ip", dim=dim) # possible options are l2, cosine or ip # Initializing index - the maximum number of elements should be known beforehand search_index.init_index(max_elements=num_elements, ef_construction=200, M=100) # Element insertion (can be called several times): ids = np.arange(num_elements) search_index.add_items(data, ids) return search_index product_search_index = construct_search_index(d, num_products, product_embeddings_array) def get_query_embeddings(query, model, tokenizer, device): inputs = tokenizer(query, padding="max_length", max_length=70, truncation=True, return_tensors="pt") model.eval() with torch.no_grad(): query_embs = model(**{k: v.to(device) for k, v in inputs.items()}).detach().cpu() return query_embs[0] def get_nearest_neighbours(k, search_index, query_embeddings, ids_to_products_dict, threshold=0.7): # Controlling the recall by setting ef: search_index.set_ef(100) # ef should always be > k # Query dataset, k - number of the closest elements (returns 2 numpy arrays) labels, distances = search_index.knn_query(query_embeddings, k=k) return [ (ids_to_products_dict[label], (1 - distance)) for label, distance in zip(labels[0], distances[0]) if (1 - distance) >= threshold ] query = "NLP and ML books" k = 10 query_embeddings = get_query_embeddings(query, model, tokenizer, device) search_results = get_nearest_neighbours(k, product_search_index, query_embeddings, ids_to_products_dict, threshold=0.7) print(f"{query=}") for product, cosine_sim_score in search_results: print(f"cosine_sim_score={round(cosine_sim_score,2)} {product=}")<jupyter_output>query='NLP and ML books' cosine_sim_score=0.92 product='Machine Learning: A Journey from Beginner to Advanced Including Deep Learning, Scikit-learn and Tensorflow' cosine_sim_score=0.91 product='Mastering Machine Learning with scikit-learn' cosine_sim_score=0.91 product='Hands-On Machine Learning with Scikit-Learn and TensorFlow: Concepts, Tools, and Techniques to Build Intelligent Systems' cosine_sim_score=0.91 product='Hands-On Machine Learning with Scikit-Learn, Keras, and TensorFlow: Concepts, Tools, and Techniques to Build Intelligent Systems' cosine_sim_score=0.91 product='Practical Deep Learning: A Python-Based Introduction' cosine_sim_score=0.9 product='Machine Learning: A Hands-On, Project-Based Introduction to Machine Learning for Absolute Beginners: Mastering Engineering ML Systems using Scikit-Learn and TensorFlow' cosine_sim_score=0.9 product='Mastering Machine Learning with scikit-learn - Second Edition: Apply effective learning algorithms to real-world problems using sci[...]
peft/examples/feature_extraction/peft_lora_embedding_semantic_similarity_inference.ipynb/0
{ "file_path": "peft/examples/feature_extraction/peft_lora_embedding_semantic_similarity_inference.ipynb", "repo_id": "peft", "token_count": 2679 }
238
<jupyter_start><jupyter_text>Fine-tune FLAN-T5 using `bitsandbytes`, `peft` & `transformers` 🤗 In this notebook we will see how to properly use `peft` , `transformers` & `bitsandbytes` to fine-tune `flan-t5-large` in a google colab!We will finetune the model on [`financial_phrasebank`](https://huggingface.co/datasets/financial_phrasebank) dataset, that consists of pairs of text-labels to classify financial-related sentences, if they are either `positive`, `neutral` or `negative`.Note that you could use the same notebook to fine-tune `flan-t5-xl` as well, but you would need to shard the models first to avoid CPU RAM issues on Google Colab, check [these weights](https://huggingface.co/ybelkada/flan-t5-xl-sharded-bf16). Install requirements<jupyter_code>!pip install -q datasets==3.6.0 accelerate !pip install -q git+https://github.com/bitsandbytes-foundation/bitsandbytes.git !pip install -q git+https://github.com/huggingface/transformers.git@main git+https://github.com/huggingface/peft.git@main<jupyter_output> ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 76.3/76.3 MB 10.6 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 462.8/462.8 KB 45.6 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 199.7/199.7 KB 26.9 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 132.0/132.0 KB 20.1 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 190.3/190.3 KB 26.8 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 213.0/213.0 KB 26.5 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 140.6/140.6 KB 20.2 MB/s eta 0:00:00 [?25h Installing build dependencies ... [?25l[?25hdone Getting requirements to build wheel ... [?25l[?25hdone Preparing metadata (pyproject.tom[...]<jupyter_text>Import model and tokenizer<jupyter_code># Select CUDA device index import os import torch os.environ["CUDA_VISIBLE_DEVICES"] = "0" from datasets import load_dataset from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, BitsAndBytesConfig model_name = "google/flan-t5-large" model = AutoModelForSeq2SeqLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True)) tokenizer = AutoTokenizer.from_pretrained(model_name)<jupyter_output><empty_output><jupyter_text>Prepare model for training Some pre-processing needs to be done before training such an int8 model using `peft`, therefore let's import an utiliy function `prepare_model_for_kbit_training` that will: - Casts all the non `int8` modules to full precision (`fp32`) for stability- Add a `forward_hook` to the input embedding layer to enable gradient computation of the input hidden states- Enable gradient checkpointing for more memory-efficient training<jupyter_code>from peft import prepare_model_for_kbit_training model = prepare_model_for_kbit_training(model)<jupyter_output><empty_output><jupyter_text>Load your `PeftModel` Here we will use LoRA (Low-Rank Adaptators) to train our model<jupyter_code>from peft import LoraConfig, get_peft_model, TaskType def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="SEQ_2_SEQ_LM" ) model = get_peft_model(model, lora_config) print_trainable_parameters(model)<jupyter_output>trainable params: 4718592 || all params: 787868672 || trainable%: 0.5989059049678777<jupyter_text>As you can see, here we are only training 0.6% of the parameters of the model! This is a huge memory gain that will enable us to fine-tune the model without any memory issue. Load and process dataHere we will use [`financial_phrasebank`](https://huggingface.co/datasets/financial_phrasebank) dataset to fine-tune our model on sentiment classification on financial sentences. We will load the split `sentences_allagree`, which corresponds according to the model card to the split where there is a 100% annotator agreement.<jupyter_code># loading dataset dataset = load_dataset("financial_phrasebank", "sentences_allagree") dataset = dataset["train"].train_test_split(test_size=0.1) dataset["validation"] = dataset["test"] del dataset["test"] classes = dataset["train"].features["label"].names dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, )<jupyter_output><empty_output><jupyter_text>Let's also apply some pre-processing of the input data, the labels needs to be pre-processed, the tokens corresponding to `pad_token_id` needs to be set to `-100` so that the `CrossEntropy` loss associated with the model will correctly ignore these tokens.<jupyter_code># data preprocessing text_column = "sentence" label_column = "text_label" max_length = 128 def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=3, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"]<jupyter_output><empty_output><jupyter_text>Train our model! Let's now train our model, run the cells below.Note that for T5 since some layers are kept in `float32` for stability purposes there is no need to call autocast on the trainer.<jupyter_code>from transformers import TrainingArguments, Trainer training_args = TrainingArguments( "temp", eval_strategy="epoch", learning_rate=1e-3, gradient_accumulation_steps=1, auto_find_batch_size=True, num_train_epochs=1, save_steps=100, save_total_limit=8, ) trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, ) model.config.use_cache = False # silence the warnings. Please re-enable for inference! trainer.train()<jupyter_output>/usr/local/lib/python3.8/dist-packages/transformers/optimization.py:346: FutureWarning: This implementation of AdamW is deprecated and will be removed in a future version. Use the PyTorch implementation torch.optim.AdamW instead, or set `no_deprecation_warning=True` to disable this warning warnings.warn( ***** Running training ***** Num examples = 2037 Num Epochs = 1 Instantaneous batch size per device = 8 Total train batch size (w. parallel, distributed & accumulation) = 8 Gradient Accumulation steps = 1 Total optimization steps = 255 Number of trainable parameters = 4718592 /usr/local/lib/python3.8/dist-packages/bitsandbytes/autograd/_functions.py:298: UserWarning: MatMul8bitLt: inputs will be cast from torch.float32 to float16 during quantization warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization")<jupyter_text>Qualitatively test our model Let's have a quick qualitative evaluation of the model, by taking a sample from the dataset that corresponds to a positive label. Run your generation similarly as you were running your model from `transformers`:<jupyter_code>model.eval() input_text = "In January-September 2009 , the Group 's net interest income increased to EUR 112.4 mn from EUR 74.3 mn in January-September 2008 ." inputs = tokenizer(input_text, return_tensors="pt").to(model.device) outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print("input sentence: ", input_text) print(" output prediction: ", tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))<jupyter_output>Generate config GenerationConfig { "_from_model_config": true, "decoder_start_token_id": 0, "eos_token_id": 1, "pad_token_id": 0, "transformers_version": "4.27.0.dev0", "use_cache": false } /usr/local/lib/python3.8/dist-packages/bitsandbytes/autograd/_functions.py:298: UserWarning: MatMul8bitLt: inputs will be cast from torch.float32 to float16 during quantization warnings.warn(f"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization") /usr/local/lib/python3.8/dist-packages/transformers/generation/utils.py:1374: UserWarning: You are calling .generate() with the `input_ids` being on a device type different than your model's device. `input_ids` is on cpu, whereas the model is on cuda. You may experience unexpected behaviors or slower generation. Please make sure that you have put `input_ids` to the correct device by calling for example input_ids = input_ids.to('cuda') before running `.generate()`. warnings.warn(<jupyter_text>Share your adapters on 🤗 Hub Once you have trained your adapter, you can easily share it on the Hub using the method `push_to_hub` . Note that only the adapter weights and config will be pushed<jupyter_code>from huggingface_hub import notebook_login notebook_login() model.push_to_hub("ybelkada/flan-t5-large-financial-phrasebank-lora", use_auth_token=True)<jupyter_output>Uploading the following files to ybelkada/flan-t5-large-lora: adapter_model.bin,adapter_config.json<jupyter_text>Load your adapter from the Hub You can load the model together with the adapter with few lines of code! Check the snippet below to load the adapter from the Hub and run the example evaluation!<jupyter_code>import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForSeq2SeqLM, AutoTokenizer peft_model_id = "ybelkada/flan-t5-large-financial-phrasebank-lora" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path, torch_dtype="auto", device_map="auto") tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model model = PeftModel.from_pretrained(model, peft_model_id) model.eval() input_text = "In January-September 2009 , the Group 's net interest income increased to EUR 112.4 mn from EUR 74.3 mn in January-September 2008 ." inputs = tokenizer(input_text, return_tensors="pt").to(model.device) outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print("input sentence: ", input_text) print(" output prediction: ", tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))<jupyter_output>Generate config GenerationConfig { "_from_model_config": true, "decoder_start_token_id": 0, "eos_token_id": 1, "pad_token_id": 0, "transformers_version": "4.27.0.dev0" } /usr/local/lib/python3.8/dist-packages/transformers/generation/utils.py:1374: UserWarning: You are calling .generate() with the `input_ids` being on a device type different than your model's device. `input_ids` is on cpu, whereas the model is on cuda. You may experience unexpected behaviors or slower generation. Please make sure that you have put `input_ids` to the correct device by calling for example input_ids = input_ids.to('cuda') before running `.generate()`. warnings.warn(
peft/examples/int8_training/Finetune_flan_t5_large_bnb_peft.ipynb/0
{ "file_path": "peft/examples/int8_training/Finetune_flan_t5_large_bnb_peft.ipynb", "repo_id": "peft", "token_count": 4331 }
239
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig, HfArgumentParser from trl import SFTConfig, SFTTrainer from peft import LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training @dataclass class ScriptArguments(SFTConfig): # model configs base_model_name_or_path: Optional[str] = field( default=None, metadata={"help": "The name or path of the fp32/16 base model."} ) residual_model_name_or_path: Optional[str] = field( default=None, metadata={ "help": "The name or path of the fp32/16 residual model. (`['fxmeng/pissa-llama-2-7b-r16-alpha-16']`)" }, ) bits: str = field(default="fp32", metadata={"help": "(`['fp4', 'nf4', 'int8', 'bf16', 'fp16', fp32]`)"}) init_lora_weights: str = field(default="pissa", metadata={"help": "(`['gaussian', 'pissa', 'pissa_niter_4']`)"}) lora_r: int = field(default=16) lora_alpha: int = field(default=16) lora_dropout: float = field(default=0) convert_pissa_to_lora: bool = field(default=False) merge_and_save: bool = field(default=False) # dataset configs data_path: str = field(default="imdb", metadata={"help": "Path to the training data."}) dataset_split: str = field(default="train[:1%]", metadata={"help": "(`['train', 'test', 'eval']`):"}) dataset_field: list[str] = field(default=None, metadata={"help": "Fields of dataset input and output."}) parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] print(script_args) print(f"Load pre-processed residual model in {script_args.bits} bits.") if script_args.bits in ["nf4", "fp4", "int8"]: quantization_config = BitsAndBytesConfig( load_in_4bit=(script_args.bits == "nf4" or script_args.bits == "fp4"), load_in_8bit=script_args.bits == "int8", bnb_4bit_quant_type=script_args.bits, bnb_4bit_use_double_quant=True, bnb_4bit_compute_dtype=torch.bfloat16, ) res_model = AutoModelForCausalLM.from_pretrained( script_args.residual_model_name_or_path, quantization_config=quantization_config, low_cpu_mem_usage=True ) res_model = prepare_model_for_kbit_training(res_model) print("Wrapping the residual model with PiSSA.") peft_model = PeftModel.from_pretrained( res_model, script_args.residual_model_name_or_path, subfolder="pissa_init", is_trainable=True ) tokenizer = AutoTokenizer.from_pretrained(script_args.residual_model_name_or_path) elif script_args.residual_model_name_or_path is not None: res_model = AutoModelForCausalLM.from_pretrained( script_args.residual_model_name_or_path, torch_dtype=( torch.float16 if script_args.bits == "fp16" else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32) ), device_map="auto", ) print("Wrapping the residual model with PiSSA.") peft_model = PeftModel.from_pretrained( res_model, script_args.residual_model_name_or_path, subfolder="pissa_init", is_trainable=True ) tokenizer = AutoTokenizer.from_pretrained(script_args.residual_model_name_or_path) elif script_args.base_model_name_or_path is not None: print( f"No available pre-processed model, manually initialize a PiSSA using {script_args.base_model_name_or_path}." ) model = AutoModelForCausalLM.from_pretrained( script_args.base_model_name_or_path, torch_dtype=( torch.float16 if script_args.bits == "fp16" else (torch.bfloat16 if script_args.bits == "bf16" else torch.float32) ), device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(script_args.base_model_name_or_path) tokenizer.pad_token_id = tokenizer.eos_token_id lora_config = LoraConfig( r=script_args.lora_r, lora_alpha=script_args.lora_alpha, init_lora_weights=script_args.init_lora_weights, lora_dropout=script_args.lora_dropout, target_modules=["q_proj", "o_proj", "k_proj", "v_proj", "gate_proj", "up_proj", "down_proj"], bias="none", task_type="CAUSAL_LM", ) peft_model = get_peft_model(model, lora_config) print(peft_model) peft_model.print_trainable_parameters() print(f"Training PiSSA with trl on the {script_args.data_path}[{script_args.dataset_split}] dataset.") dataset = load_dataset(script_args.data_path, split=script_args.dataset_split) dataset = dataset.map( lambda example: { "text": f"### USER: {example[script_args.dataset_field[0]]}\n### ASSISTANT: {example[script_args.dataset_field[1]]}" } ) trainer = SFTTrainer( model=peft_model, args=script_args, train_dataset=dataset, processing_class=tokenizer, ) trainer.train() trainer.save_state() ############################## Upon training completion, convert and save PiSSA in LoRA format ############################## if script_args.convert_pissa_to_lora: peft_model.save_pretrained( os.path.join(script_args.output_dir, "pissa_lora"), path_initial_model_for_weight_conversion=os.path.join(script_args.residual_model_name_or_path, "pissa_init"), ) else: peft_model.save_pretrained( os.path.join(script_args.output_dir, "pissa_ft"), ) if script_args.merge_and_save: model = peft_model.merge_and_unload() model.save_pretrained(os.path.join(script_args.output_dir, "pissa_merged")) tokenizer.save_pretrained(os.path.join(script_args.output_dir, "pissa_merged"))
peft/examples/pissa_finetuning/pissa_finetuning.py/0
{ "file_path": "peft/examples/pissa_finetuning/pissa_finetuning.py", "repo_id": "peft", "token_count": 2527 }
240
<jupyter_start><jupyter_text>Named Entity Recognition with Peft Model 🤗 In this notebook, we will learn how to perform Named Entity Recognition(NER) on the CoNLL-2003 dataset using the Trainer class This notebook has been adapted from the main NLP course here - https://huggingface.co/learn/nlp-course/chapter7/2?fw=ptfine-tuning-the-model<jupyter_code>#install the required libraries !pip install -q datasets evaluate transformers seqeval # Import required libraries from datasets import load_dataset from transformers import AutoTokenizer, AutoModelForTokenClassification, DataCollatorForTokenClassification, TrainingArguments, Trainer, pipeline from peft import get_peft_model, LoraConfig, TaskType import evaluate import numpy as np from huggingface_hub import notebook_login raw_datasets = load_dataset("conll2003") print(raw_datasets) # Look at the tokens of the first training example raw_datasets["train"][0]["tokens"] # Look at the NER tags of the first training example raw_datasets["train"][0]["ner_tags"] # Get the label names for the NER tags ner_feature = raw_datasets["train"].features["ner_tags"] label_names = ner_feature.feature.names label_names words = raw_datasets["train"][0]["tokens"] labels = raw_datasets["train"][0]["ner_tags"] line1 = "" line2 = "" for word, label in zip(words, labels): full_label = label_names[label] max_length = max(len(word), len(full_label)) line1 += word + " " * (max_length - len(word) + 1) line2 += full_label + " " * (max_length - len(full_label) + 1) print(line1) print(line2) # Load the tokenizer model_checkpoint = "bert-base-cased" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) # Tokenize the first training example inputs = tokenizer(raw_datasets["train"][0]["tokens"], is_split_into_words=True) inputs.tokens() def align_labels_with_tokens(labels, word_ids): new_labels = [] current_word = None for word_id in word_ids: if word_id != current_word: # Start of a new word! current_word = word_id label = -100 if word_id is None else labels[word_id] new_labels.append(label) elif word_id is None: # Special token new_labels.append(-100) else: # Same word as previous token label = labels[word_id] # If the label is B-XXX we change it to I-XXX if label % 2 == 1: label += 1 new_labels.append(label) return new_labels labels = raw_datasets["train"][0]["ner_tags"] word_ids = inputs.word_ids() print(labels) print(align_labels_with_tokens(labels, word_ids)) def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer( examples["tokens"], truncation=True, is_split_into_words=True ) all_labels = examples["ner_tags"] new_labels = [] for i, labels in enumerate(all_labels): word_ids = tokenized_inputs.word_ids(i) new_labels.append(align_labels_with_tokens(labels, word_ids)) tokenized_inputs["labels"] = new_labels return tokenized_inputs tokenized_datasets = raw_datasets.map( tokenize_and_align_labels, batched=True, remove_columns=raw_datasets["train"].column_names, ) data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer) for i in range(2): print(tokenized_datasets["train"][i]["labels"]) metric = evaluate.load("seqeval") # Create label mappings id2label = {i: label for i, label in enumerate(label_names)} label2id = {v: k for k, v in id2label.items()} # Load the pre-trained model model = AutoModelForTokenClassification.from_pretrained( model_checkpoint, id2label=id2label, label2id=label2id, ) model.config.num_labels model # Configure LoRA (Low-Rank Adaptation) for fine-tuning peft_config = LoraConfig(target_modules = ["query", "key"], task_type = TaskType.TOKEN_CLS) model = get_peft_model(model, peft_config) model.print_trainable_parameters() def compute_metrics(eval_preds): logits, labels = eval_preds predictions = np.argmax(logits, axis=-1) # Remove ignored index (special tokens) and convert to labels true_labels = [[label_names[l] for l in label if l != -100] for label in labels] true_predictions = [ [label_names[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] all_metrics = metric.compute(predictions=true_predictions, references=true_labels) return { "precision": all_metrics["overall_precision"], "recall": all_metrics["overall_recall"], "f1": all_metrics["overall_f1"], "accuracy": all_metrics["overall_accuracy"], } notebook_login() args = TrainingArguments( "bert-finetuned-ner-lora", eval_strategy="epoch", per_device_train_batch_size=32, # decrease this for OOM error per_device_eval_batch_size=64, save_strategy="epoch", learning_rate=2e-3, num_train_epochs=5, weight_decay=0.01, load_best_model_at_end=True, do_eval=True, do_predict=True, metric_for_best_model="accuracy", label_names=["labels"], push_to_hub=True, ) trainer = Trainer( model=model, args=args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, processing_class=tokenizer, compute_metrics=compute_metrics ) trainer.train() from peft import PeftModel # Replace this with your own checkpoint lora_checkpoint = "./bert-finetuned-ner-lora" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) base_model = AutoModelForTokenClassification.from_pretrained( model_checkpoint, id2label=id2label, label2id=label2id, ) lora_model = PeftModel.from_pretrained(base_model, lora_checkpoint) token_classifier = pipeline( "token-classification", model=lora_model, tokenizer=tokenizer, aggregation_strategy="simple" ) token_classifier("My name is Jino.")<jupyter_output>Some weights of BertForTokenClassification were not initialized from the model checkpoint at bert-base-cased and are newly initialized: ['classifier.bias', 'classifier.weight'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. Device set to use xpu:0
peft/examples/token_classification/peft_lora_ner.ipynb/0
{ "file_path": "peft/examples/token_classification/peft_lora_ner.ipynb", "repo_id": "peft", "token_count": 2386 }
241
{ "auto_mapping": null, "base_model_name_or_path": null, "bias": "none", "exclude_modules": null, "fan_in_fan_out": false, "inference_mode": false, "init_weights": false, "layers_pattern": null, "layers_to_transform": null, "modules_to_save": null, "block_size": 64, "block_size_pattern": {}, "peft_type": "C3A", "revision": null, "target_modules": [ "v_proj", "q_proj" ], "task_type": null }
peft/method_comparison/MetaMathQA/experiments/c3a/llama-3.2-3B-default/adapter_config.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/c3a/llama-3.2-3B-default/adapter_config.json", "repo_id": "peft", "token_count": 193 }
242
{ "auto_mapping": null, "base_model_name_or_path": null, "bias": "none", "d_initial": 0.1, "fan_in_fan_out": false, "inference_mode": false, "init_weights": true, "layers_pattern": null, "layers_to_transform": null, "modules_to_save": null, "peft_type": "VERA", "projection_prng_key": 0, "r": 256, "revision": null, "save_projection": true, "target_modules": null, "task_type": null, "vera_dropout": 0.0 }
peft/method_comparison/MetaMathQA/experiments/vera/llama-3.2-3B-default/adapter_config.json/0
{ "file_path": "peft/method_comparison/MetaMathQA/experiments/vera/llama-3.2-3B-default/adapter_config.json", "repo_id": "peft", "token_count": 193 }
243
{ "base_model_name_or_path": null, "bias": "none", "fan_in_fan_out": false, "inference_mode": false, "init_lora_weights": true, "lora_alpha": 16, "lora_dropout": 0.1, "modules_to_save": null, "peft_type": "LORA", "r": 8, "target_modules": [ "q_proj", "v_proj" ], "task_type": "CAUSAL_LM" }
peft/method_comparison/text_generation_benchmark/experiments/lora/lora_r8/adapter_config.json/0
{ "file_path": "peft/method_comparison/text_generation_benchmark/experiments/lora/lora_r8/adapter_config.json", "repo_id": "peft", "token_count": 196 }
244
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import importlib import os from typing import Optional from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoTokenizer, ) from .config import PeftConfig from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) from .utils.constants import TOKENIZER_CONFIG_NAME from .utils.other import check_file_exists_on_hf_hub MODEL_TYPE_TO_PEFT_MODEL_MAPPING: dict[str, type[PeftModel]] = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, "QUESTION_ANS": PeftModelForQuestionAnswering, "FEATURE_EXTRACTION": PeftModelForFeatureExtraction, } class _BaseAutoPeftModel: _target_class = None _target_peft_class = None def __init__(self, *args, **kwargs): # For consistency with transformers: https://github.com/huggingface/transformers/blob/91d7df58b6537d385e90578dac40204cb550f706/src/transformers/models/auto/auto_factory.py#L400 raise EnvironmentError( # noqa: UP024 f"{self.__class__.__name__} is designed to be instantiated " f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or " f"`{self.__class__.__name__}.from_config(config)` methods." ) @classmethod def from_pretrained( cls, pretrained_model_name_or_path, adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, revision: Optional[str] = None, **kwargs, ): r""" A wrapper around all the preprocessing steps a user needs to perform in order to load a PEFT model. The kwargs are passed along to `PeftConfig` that automatically takes care of filtering the kwargs of the Hub methods and the config object init. """ peft_config = PeftConfig.from_pretrained(pretrained_model_name_or_path, revision=revision, **kwargs) base_model_path = peft_config.base_model_name_or_path base_model_revision = peft_config.revision task_type = getattr(peft_config, "task_type", None) if cls._target_class is not None: target_class = cls._target_class elif cls._target_class is None and task_type is not None: # this is only in the case where we use `AutoPeftModel` raise ValueError( "Cannot use `AutoPeftModel` with a task type, please use a specific class for your task type. (e.g. `AutoPeftModelForCausalLM` for `task_type='CAUSAL_LM'`)" ) if task_type is not None: expected_target_class = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[task_type] if cls._target_peft_class.__name__ != expected_target_class.__name__: raise ValueError( f"Expected target PEFT class: {expected_target_class.__name__}, but you have asked for: {cls._target_peft_class.__name__}" " make sure that you are loading the correct model for your task type." ) elif task_type is None and getattr(peft_config, "auto_mapping", None) is not None: auto_mapping = getattr(peft_config, "auto_mapping", None) base_model_class = auto_mapping["base_model_class"] parent_library_name = auto_mapping["parent_library"] parent_library = importlib.import_module(parent_library_name) target_class = getattr(parent_library, base_model_class) else: raise ValueError( "Cannot infer the auto class from the config, please make sure that you are loading the correct model for your task type." ) base_model = target_class.from_pretrained(base_model_path, revision=base_model_revision, **kwargs) tokenizer_exists = False if os.path.exists(os.path.join(pretrained_model_name_or_path, TOKENIZER_CONFIG_NAME)): tokenizer_exists = True else: token = kwargs.get("token", None) if token is None: token = kwargs.get("use_auth_token", None) tokenizer_exists = check_file_exists_on_hf_hub( repo_id=pretrained_model_name_or_path, filename=TOKENIZER_CONFIG_NAME, revision=revision, repo_type=kwargs.get("repo_type", None), token=token, ) if tokenizer_exists and hasattr(base_model, "get_input_embeddings"): tokenizer = AutoTokenizer.from_pretrained( pretrained_model_name_or_path, trust_remote_code=kwargs.get("trust_remote_code", False) ) embedding_size = base_model.get_input_embeddings().weight.shape[0] if len(tokenizer) > embedding_size: # only resize if the tokenizer has a larger vocab size than there are embeddings base_model.resize_token_embeddings(len(tokenizer)) return cls._target_peft_class.from_pretrained( base_model, pretrained_model_name_or_path, adapter_name=adapter_name, is_trainable=is_trainable, config=config, **kwargs, ) class AutoPeftModel(_BaseAutoPeftModel): _target_class = None _target_peft_class = PeftModel class AutoPeftModelForCausalLM(_BaseAutoPeftModel): _target_class = AutoModelForCausalLM _target_peft_class = PeftModelForCausalLM class AutoPeftModelForSeq2SeqLM(_BaseAutoPeftModel): _target_class = AutoModelForSeq2SeqLM _target_peft_class = PeftModelForSeq2SeqLM class AutoPeftModelForSequenceClassification(_BaseAutoPeftModel): _target_class = AutoModelForSequenceClassification _target_peft_class = PeftModelForSequenceClassification class AutoPeftModelForTokenClassification(_BaseAutoPeftModel): _target_class = AutoModelForTokenClassification _target_peft_class = PeftModelForTokenClassification class AutoPeftModelForQuestionAnswering(_BaseAutoPeftModel): _target_class = AutoModelForQuestionAnswering _target_peft_class = PeftModelForQuestionAnswering class AutoPeftModelForFeatureExtraction(_BaseAutoPeftModel): _target_class = AutoModel _target_peft_class = PeftModelForFeatureExtraction
peft/src/peft/auto.py/0
{ "file_path": "peft/src/peft/auto.py", "repo_id": "peft", "token_count": 2989 }
245
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from dataclasses import dataclass, field from typing import Optional from peft.tuners.lora import LoraConfig from peft.utils import PeftType @dataclass class AdaLoraConfig(LoraConfig): """ This is the configuration class to store the configuration of a [`~peft.AdaLora`]. AdaLoRA has three phases defined by `tinit`, `tfinal` and `total_step`. The initial phase can be understood as a step for pre-training the adapters so that when reducing their rank, there is already some information encoded that can be reduced instead of random matrices. This phase is defined by supplying `tinit`. After the initial phase is over (`tinit` steps have passed) and the final phase has not begun, AdaLoRA reduces the budget of how much rank each layer is allowed to have with each step. This is where the reduction of rank is happening. This goes on until `total_step - tfinal` steps are reached. The last phase, beginning once `total_step - tfinal` steps are reached, does not change the layer ranks anymore but fine-tunes the reduced-rank layers that resulted from the previous phase. A practical example: `tinit` is 10, `tfinal` is 20, `total_step` is 100. We spend 10 steps doing pre-training without rank reduction because our budget is constant (init phase), then we spend 80 (100-20) steps in the reduction phase where our budget decreases step-wise and, finally, 20 steps in the final fine-tuning stage without reduction. Args: target_r (`int`): The target average rank of incremental matrix. init_r (`int`): The initial rank for each incremental matrix. tinit (`int`): The steps of initial fine-tuning warmup. tfinal (`int`): The number of steps of final fine-tuning. deltaT (`int`): The time internval between two budget allocations. beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing. beta2 (`float`): The hyperparameter of EMA for undertainty quantification. orth_reg_weight (`float`): The coefficient of orthogonal regularization. total_step (`int`): The total training steps that should be specified before training. rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator. """ target_r: int = field(default=8, metadata={"help": "Target Lora matrix dimension."}) init_r: int = field(default=12, metadata={"help": "Initial Lora matrix dimension."}) tinit: int = field(default=0, metadata={"help": "The steps of initial warmup."}) tfinal: int = field(default=0, metadata={"help": "The steps of final warmup."}) deltaT: int = field(default=1, metadata={"help": "Step interval of rank allocation."}) beta1: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) beta2: float = field(default=0.85, metadata={"help": "Hyperparameter of EMA."}) orth_reg_weight: float = field(default=0.5, metadata={"help": "The orthogonal regularization coefficient."}) total_step: Optional[int] = field(default=None, metadata={"help": "The total training steps."}) rank_pattern: Optional[dict] = field(default=None, metadata={"help": "The saved rank pattern."}) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.ADALORA if self.use_dora: raise ValueError(f"{self.peft_type} does not support DoRA.") if self.loftq_config: raise ValueError(f"{self.peft_type} does not support LOFTQ.") self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) self.exclude_modules = ( set(self.exclude_modules) if isinstance(self.exclude_modules, list) else self.exclude_modules ) # if target_modules is a regex expression, then layers_to_transform should be None if isinstance(self.target_modules, str) and self.layers_to_transform is not None: raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.") # check for layers_to_transform and layers_pattern if self.layers_pattern and not self.layers_to_transform: raise ValueError("When `layers_pattern` is specified, `layers_to_transform` must also be specified. ") # Check if 'r' has been set to a non-default value if self.r != 8: # 8 is the default value for 'r' in LoraConfig warnings.warn( "Note that `r` is not used in AdaLora and will be ignored." "If you intended to set the initial rank, use `init_r` instead." ) if self.total_step is None or self.total_step <= 0: raise ValueError("AdaLoRA does not work when `total_step` is None, supply a value > 0.") if self.tinit >= (self.total_step - self.tfinal): raise ValueError( "The supplied schedule values don't allow for a budgeting phase. Decrease `tfinal`/`tinit` or " "increase `total_step`." )
peft/src/peft/tuners/adalora/config.py/0
{ "file_path": "peft/src/peft/tuners/adalora/config.py", "repo_id": "peft", "token_count": 1944 }
246
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Union import torch from torch import nn from peft.tuners.lycoris_utils import LycorisConfig, LycorisTuner from peft.utils import TRANSFORMERS_MODELS_TO_LOHA_TARGET_MODULES_MAPPING from peft.utils.other import get_pattern_key from .layer import Conv2d, Linear, LoHaLayer class LoHaModel(LycorisTuner): """ Creates Low-Rank Hadamard Product model from a pretrained model. The method is partially described in https://huggingface.co/papers/2108.06098 Current implementation heavily borrows from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/loha.py Args: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. config ([`LoHaConfig`]): The configuration of the LoHa model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. Returns: `torch.nn.Module`: The LoHa model. Example: ```py >>> from diffusers import StableDiffusionPipeline >>> from peft import LoHaModel, LoHaConfig >>> config_te = LoHaConfig( ... r=8, ... lora_alpha=32, ... target_modules=["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], ... rank_dropout=0.0, ... module_dropout=0.0, ... init_weights=True, ... ) >>> config_unet = LoHaConfig( ... r=8, ... lora_alpha=32, ... target_modules=[ ... "proj_in", ... "proj_out", ... "to_k", ... "to_q", ... "to_v", ... "to_out.0", ... "ff.net.0.proj", ... "ff.net.2", ... ], ... rank_dropout=0.0, ... module_dropout=0.0, ... init_weights=True, ... use_effective_conv2d=True, ... ) >>> model = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") >>> model.text_encoder = LoHaModel(model.text_encoder, config_te, "default") >>> model.unet = LoHaModel(model.unet, config_unet, "default") ``` **Attributes**: - **model** ([`~torch.nn.Module`]) -- The model to be adapted. - **peft_config** ([`LoHaConfig`]): The configuration of the LoHa model. """ prefix: str = "hada_" layers_mapping: dict[type[torch.nn.Module], type[LoHaLayer]] = { torch.nn.Conv2d: Conv2d, torch.nn.Linear: Linear, } def _create_and_replace( self, config: LycorisConfig, adapter_name: str, target: Union[LoHaLayer, nn.Module], target_name: str, parent: nn.Module, current_key: str, ) -> None: """ A private method to create and replace the target module with the adapter module. """ r_key = get_pattern_key(config.rank_pattern.keys(), current_key) alpha_key = get_pattern_key(config.alpha_pattern.keys(), current_key) kwargs = config.to_dict() kwargs["r"] = config.rank_pattern.get(r_key, config.r) kwargs["alpha"] = config.alpha_pattern.get(alpha_key, config.alpha) if isinstance(target, LoHaLayer): target.update_layer(adapter_name, **kwargs) else: new_module = self._create_new_module(config, adapter_name, target, **kwargs) self._replace_module(parent, target_name, new_module, target) @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_LOHA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_LOHA_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config
peft/src/peft/tuners/loha/model.py/0
{ "file_path": "peft/src/peft/tuners/loha/model.py", "repo_id": "peft", "token_count": 2086 }
247
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTE: PEFT tests related to INC are handled under Optimum-Habana repository: # - LLMs: https://github.com/huggingface/optimum-habana/blob/main/tests/test_peft_inference.py # - Diffusers: https://github.com/huggingface/optimum-habana/blob/main/tests/test_diffusers.py from typing import Optional import torch from peft.import_utils import is_inc_available from peft.tuners.tuners_utils import BaseTunerLayer from .layer import Linear if is_inc_available(): class IncLoraLinear(Linear): def __init__( self, base_layer: torch.nn.Module, adapter_name: str, **kwargs, ): super().__init__(base_layer, adapter_name, **kwargs) def merge(self, safe_merge: bool = False, adapter_names: Optional[list[str]] = None) -> None: """ Merge the active adapter weights into the base weights Args: safe_merge (`bool`, *optional*): If True, the merge operation will be performed in a copy of the original weights and check for NaNs before merging the weights. This is useful if you want to check if the merge operation will produce NaNs. Defaults to `False`. adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ raise NotImplementedError("Merging LoRA with INC layers is not yet implemented") def unmerge(self) -> None: """ This method unmerges all merged adapter layers from the base weights. """ raise NotImplementedError("Unmerging LoRA from INC layers is not yet implemented") def dispatch_inc(target: torch.nn.Module, adapter_name: str, **kwargs): new_module = None if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if is_inc_available(): from neural_compressor.torch.algorithms.fp8_quant._quant_common.helper_modules import ( PatchedLinear, ) if isinstance(target_base_layer, PatchedLinear): new_module = IncLoraLinear(target, adapter_name, **kwargs) return new_module
peft/src/peft/tuners/lora/inc.py/0
{ "file_path": "peft/src/peft/tuners/lora/inc.py", "repo_id": "peft", "token_count": 1141 }
248
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math from typing import Any import torch import torch.nn as nn from peft.tuners.tuners_utils import BaseTunerLayer from .config import PolyConfig from .router import get_router class PolyLayer(BaseTunerLayer): # All names of layers that may contain (trainable) adapter weights adapter_layer_names = ("poly_lora_A", "poly_lora_B", "poly_router") # All names of other parameters that may contain adapter-related parameters other_param_names = ("r", "n_tasks", "n_skills", "n_splits") def __init__(self, base_layer: nn.Module, **kwargs): self.base_layer = base_layer self.r = {} self.n_tasks = {} self.n_skills = {} self.n_splits = {} self.poly_type = {} self.poly_router = nn.ModuleDict() self.poly_lora_A = nn.ParameterDict() self.poly_lora_B = nn.ParameterDict() self.kwargs = kwargs base_layer = self.get_base_layer() if isinstance(base_layer, nn.Linear): in_features, out_features = base_layer.in_features, base_layer.out_features else: raise ValueError(f"Unsupported layer type {type(base_layer)}") self.in_features = in_features self.out_features = out_features def update_layer(self, adapter_name, poly_config): if poly_config.r <= 0: raise ValueError(f"`r` should be a positive integer value but the value passed is {poly_config.r}") self.r[adapter_name] = poly_config.r self.n_tasks[adapter_name] = poly_config.n_tasks self.n_skills[adapter_name] = poly_config.n_skills self.n_splits[adapter_name] = poly_config.n_splits self.poly_type[adapter_name] = poly_config.poly_type self.poly_lora_A[adapter_name] = nn.Parameter( torch.empty( poly_config.n_splits, poly_config.n_skills, self.in_features // poly_config.n_splits, poly_config.r, ) ) self.poly_lora_B[adapter_name] = nn.Parameter( torch.empty( poly_config.n_splits, poly_config.n_skills, poly_config.r, self.out_features // poly_config.n_splits, ) ) self.poly_router[adapter_name] = get_router(poly_config) self.reset_poly_parameters(adapter_name, init_weights=poly_config.init_weights) self._move_adapter_to_device_of_base_layer(adapter_name) self.set_adapter(self.active_adapters) def reset_poly_parameters(self, adapter_name, init_weights): if adapter_name in self.poly_lora_A.keys(): # initialize A the same way as the default for nn.Linear # https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L269 n_splits, n_skills, d, r = self.poly_lora_A[adapter_name].shape for skill in range(n_skills): for split in range(n_splits): param = torch.empty((r, d)) torch.nn.init.kaiming_uniform_(param, a=math.sqrt(5)) self.poly_lora_A[adapter_name].data[split, skill, :, :] = param.T if init_weights: # initialize B to zero torch.nn.init.zeros_(self.poly_lora_B[adapter_name]) else: # initialize B the same way as the default for nn.Linear n_splits, n_skills, r, d = self.poly_lora_B[adapter_name].shape for skill in range(n_skills): for split in range(n_splits): param = torch.empty((d, r)) torch.nn.init.kaiming_uniform_(param, a=math.sqrt(5)) self.poly_lora_B[adapter_name].data[split, skill, :, :] = param.T # initialized router self.poly_router[adapter_name].reset() class Linear(nn.Module, PolyLayer): # Lora implemented in a dense layer def __init__( self, base_layer, adapter_name: str, poly_config: PolyConfig, **kwargs, ) -> None: super().__init__() PolyLayer.__init__(self, base_layer, **kwargs) self._active_adapter = adapter_name self.update_layer(adapter_name, poly_config) def forward(self, x: torch.Tensor, *args: Any, task_ids: torch.Tensor = None, **kwargs: Any) -> torch.Tensor: previous_dtype = x.dtype if self.disable_adapters: result = self.base_layer(x, *args, **kwargs) else: result = self.base_layer(x, *args, **kwargs) for active_adapter in self.active_adapters: if active_adapter not in self.poly_lora_A.keys(): continue r = self.r[active_adapter] poly_router = self.poly_router[active_adapter] poly_lora_A = self.poly_lora_A[active_adapter] poly_lora_B = self.poly_lora_B[active_adapter] # Combine the output of LoRAs # https://github.com/microsoft/mttl/blob/ce4ca51dbca73be656feb9b3e5233633e3c5dec7/mttl/models/poly.py#L293 mixing_weights = poly_router(task_ids=task_ids, input_ids=x) bs, n_splits, n_skills = mixing_weights.size() # A is n_splits, n_skills, D // n_splits, rank # we want bs, n_splits, D // n_splits, rank A = torch.einsum("bqs,qsdr->bqdr", (mixing_weights, poly_lora_A)) B = torch.einsum("bqs,qsrd->bqrd", (mixing_weights, poly_lora_B)) A = A.reshape(bs, self.in_features, r) B = B.transpose(1, 2).reshape(bs, r, self.out_features) x = x.to(A.dtype) result += x.bmm(A).bmm(B) / r result = result.to(previous_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "poly." + rep
peft/src/peft/tuners/poly/layer.py/0
{ "file_path": "peft/src/peft/tuners/poly/layer.py", "repo_id": "peft", "token_count": 3184 }
249
# Copyright 2025-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import Literal, Optional, Union from peft.config import PeftConfig from peft.utils import PeftType RoadVariant = Literal["road_1", "road_2", "road_4"] @dataclass class RoadConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`RoadModel`]. RoAd adapter is proposed in https://arxiv.org/pdf/2409.00119. Args: variant (Union[`RoadVariant`, `str`]): The variant of the Road model to use. It can be one of road_1, road_2, or road_4. Refer to the paper for more details. - road_1: Uses the same scale and angle for all pairs of elements. This variant has lowest number of parameters, it stores a number equal to the output hidden size of parameters for each layer that RoAd is applied to. - road_2: Uses the same scale and angle for each element. This variant has 2x the number of parameters compared to road_1. - road_4: Uses two different scales and angles for each ellement. This variant has 4x the number of parameters compared to road_1. group_size (`int`): Group size defines how elements are grouped together into 2D vectors for rotation. Within each group element 0 is paired with element group_size/2, then element 1 is paired with element group_size/2+1 and so on. This has no effect on the model performance, since elements are unordered, however it has some effect on inference speed when used in e.g. VLLM. For best speed group size of at least 32 or 64 (the default) is recommended. Note that model hidden size (or hidden size per partition when used with tensor parallelism) must be divisible by group_size, so for very small models you might need to reduce this parameter. init_weights (`bool`): Whether to perform initialization of RoAd weights. target_modules (`Optional[Union[List[str], str]]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen (if the model is a PreTrainedModel, the output layer excluded). If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. modules_to_save (`List[str]`): List of modules apart from Road layers to be set as trainable and saved in the final checkpoint. """ variant: Union[str, RoadVariant] = field( default="road_1", metadata={"help": ("Variant of the Road model to use.")}, ) group_size: int = field( default=64, metadata={ "help": ( "Group size defines how elements are grouped together into 2D vectors for rotation. " "Within each group element 0 is paired with element group_size/2, " "then element 1 is paired with element group_size/2+1 and so on. " "This has no effect on the model performance, since elements are unordered, " "however it has some effect on inference speed when used in e.g. VLLM. " "For best speed group size of at least 64 is recommended. " "Note that model hidden size (or hidden size per partition when used with tensor parallelism) " "must be divisible by group_size, so for very small models you might need to reduce this parameter." ) }, ) init_weights: bool = field( default=True, metadata={ "help": ( "Whether to initialize the weights of the RoAd layers with their default initialization. Don't change " "this setting, except if you know exactly what you're doing." ), }, ) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "List of module names or regex expression of the module names to replace with Road." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'." "This can also be a wildcard 'all-linear' which matches all linear/Conv1D " "(if the model is a PreTrainedModel, the output layer excluded)." "If not specified, modules will be chosen according to the model architecture, If the architecture is " "not known, an error will be raised -- in this case, you should specify the target modules manually." ), }, ) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": ( "List of modules apart from RoAd layers to be set as trainable and saved in the final checkpoint. For" " example, in Sequence Classification or Token Classification tasks, the final layer" " `classifier/score` are randomly initialized and as such need to be trainable and saved." ) }, ) def __post_init__(self): super().__post_init__() self.peft_type = PeftType.ROAD self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) if self.variant not in ["road_1", "road_2", "road_4"]: raise ValueError(f"Invalid variant {self.variant} specified. Please choose from road_1, road_2 or road_4") if self.group_size <= 0 or self.group_size % 2 != 0: raise ValueError(f"The group_size must be divisible by 2 when using RoadLayer, but got {self.group_size}.")
peft/src/peft/tuners/road/config.py/0
{ "file_path": "peft/src/peft/tuners/road/config.py", "repo_id": "peft", "token_count": 2480 }
250
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from dataclasses import asdict from enum import Enum from typing import Optional import torch import torch.nn as nn from tqdm import tqdm from transformers.pytorch_utils import Conv1D from peft.tuners.tuners_utils import BaseTuner, BaseTunerLayer, check_target_module_exists from peft.utils import TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING, ModulesToSaveWrapper, _get_submodules from .config import VBLoRAConfig from .layer import Linear, VBLoRALayer class VBLoRAModel(BaseTuner): """ Creates VBLoRA model from a pretrained transformers model. The method is described in detail in https://huggingface.co/papers/2405.15179. Args: model ([`~transformers.PreTrainedModel`]): The model to be adapted. config ([`VBLoRAConfig`]): The configuration of the VBLoRA model. adapter_name (`str`): The name of the adapter, defaults to `"default"`. low_cpu_mem_usage (`bool`, `optional`, defaults to `False`): Create empty adapter weights on meta device. Useful to speed up the loading process. Returns: `torch.nn.Module`: The VBLoRA model. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import VBLoRAConfig, get_peft_model >>> base_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") >>> config = VBLoRAConfig( ... task_type="SEQ_CLS", ... r=4, ... target_modules=["fc1", "fc2", "k_proj", "out_proj", "q_proj", "v_proj"], ... num_vectors=60, ... vector_length=256, ... save_only_topk_weights=True, ... ) >>> model = get_peft_model(base_model, config) ``` **Attributes**: - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted. - **peft_config** ([`VBLoRAConfig`]): The configuration of the VBLoRAConfig model. """ prefix: str = "vblora_" def _init_vblora_vector_bank(self, config: VBLoRAConfig, adapter_name: str) -> None: vblora_vector_bank = torch.zeros(config.num_vectors, config.vector_length) torch.nn.init.uniform_(vblora_vector_bank, -config.init_vector_bank_bound, config.init_vector_bank_bound) self.vblora_vector_bank[adapter_name] = vblora_vector_bank def _pre_injection_hook(self, model: nn.Module, config: VBLoRAConfig, adapter_name: str) -> None: self.vblora_vector_bank = nn.ParameterDict({}) def _check_new_adapter_config(self, config: VBLoRAConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ # the below todo is copied from LoRA # TODO: there should be a check if any of the existing adapters actually has bias != "none", or else the check # does not fully correspond to the error message. if (len(self.peft_config) > 1) and (config.bias != "none"): raise ValueError( f"{self.__class__.__name__} supports only 1 adapter with bias. When using multiple adapters, " "set bias to 'none' for all adapters." ) @staticmethod def _check_target_module_exists(vblora_config, key): return check_target_module_exists(vblora_config, key) def _create_and_replace( self, vblora_config, adapter_name, target, target_name, parent, current_key, ): if current_key is None: raise ValueError("Current Key shouldn't be `None`") bias = hasattr(target, "bias") and target.bias is not None kwargs = { "fan_in_fan_out": vblora_config.fan_in_fan_out, "bias": bias, } self._init_vblora_vector_bank(vblora_config, adapter_name) # TODO: add quantization support if isinstance(target, Linear): target.update_layer( adapter_name=adapter_name, vblora_vector_bank=self.vblora_vector_bank, r=vblora_config.r, topk=vblora_config.topk, num_vectors=vblora_config.num_vectors, vector_length=vblora_config.vector_length, vblora_dropout=vblora_config.vblora_dropout, init_logits_std=vblora_config.init_logits_std, ) else: new_module = self._create_new_module( vblora_config=vblora_config, vblora_vector_bank=self.vblora_vector_bank, adapter_name=adapter_name, target=target, **kwargs, ) if adapter_name not in self.active_adapter: # adding an additional adapter: it is not automatically trainable new_module.requires_grad_(False) self._replace_module(parent, target_name, new_module, target) @staticmethod def _replace_module(parent, child_name, new_module, child): setattr(parent, child_name, new_module) # It's not necessary to set requires_grad here, as that is handled by # _mark_only_adapters_as_trainable # child layer wraps the original module, unpack it if hasattr(child, "base_layer"): child = child.base_layer if not hasattr(new_module, "base_layer"): new_module.weight = child.weight if hasattr(child, "bias"): new_module.bias = child.bias if getattr(child, "state", None) is not None: if hasattr(new_module, "base_layer"): new_module.base_layer.state = child.state else: new_module.state = child.state new_module.to(child.weight.device) meta = torch.device("meta") # dispatch to correct device for name, module in new_module.named_modules(): if "vblora_" in name: if not any(p.device == meta for p in module.parameters()): module.to(child.weight.device) def _mark_only_adapters_as_trainable(self, model: nn.Module) -> None: for n, p in model.named_parameters(): if self.prefix not in n: p.requires_grad = False for active_adapter in self.active_adapters: bias = self.peft_config[active_adapter].bias if bias == "none": continue if bias == "all": for n, p in model.named_parameters(): if "bias" in n: p.requires_grad = True elif bias == "vblora_only": for m in model.modules(): if isinstance(m, VBLoRALayer) and hasattr(m, "bias") and m.bias is not None: m.bias.requires_grad = True else: raise NotImplementedError(f"Requested bias: {bias}, is not implemented.") @staticmethod def _create_new_module(vblora_config, vblora_vector_bank, adapter_name, target, **kwargs): if isinstance(target, BaseTunerLayer): target_base_layer = target.get_base_layer() else: target_base_layer = target if isinstance(target_base_layer, torch.nn.Linear): if kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. " "Setting fan_in_fan_out to False." ) kwargs["fan_in_fan_out"] = vblora_config.fan_in_fan_out = False elif isinstance(target_base_layer, Conv1D): kwargs["is_target_conv_1d_layer"] = True if not kwargs["fan_in_fan_out"]: warnings.warn( "fan_in_fan_out is set to False but the target module is `Conv1D`. Setting fan_in_fan_out to True." ) kwargs["fan_in_fan_out"] = vblora_config.fan_in_fan_out = True else: raise ValueError( f"Target module {target} is not supported. Currently, only the following modules are supported: " "`torch.nn.Linear`, `transformers.pytorch_utils.Conv1D`." ) new_module = Linear( base_layer=target, vblora_vector_bank=vblora_vector_bank, adapter_name=adapter_name, r=vblora_config.r, num_vectors=vblora_config.num_vectors, vector_length=vblora_config.vector_length, topk=vblora_config.topk, vblora_dropout=vblora_config.vblora_dropout, init_logits_std=vblora_config.init_logits_std, **kwargs, ) return new_module def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: if name == "model": # see #1892: prevent infinite recursion if class is not initialized raise return getattr(self.model, name) def get_peft_config_as_dict(self, inference: bool = False): config_dict = {} for key, value in self.peft_config.items(): config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()} if inference: config["inference_mode"] = True config_dict[key] = config return config def _set_adapter_layers(self, enabled: bool = True) -> None: for module in self.model.modules(): if isinstance(module, (BaseTunerLayer, ModulesToSaveWrapper)): module.enable_adapters(enabled) def enable_adapter_layers(self) -> None: """Enable all adapters. Call this if you have previously disabled all adapters and want to re-enable them. """ self._set_adapter_layers(enabled=True) def disable_adapter_layers(self) -> None: """Disable all adapters. When disabling all adapters, the model output corresponds to the output of the base model. """ for active_adapter in self.active_adapters: val = self.peft_config[active_adapter].bias if val != "none": msg = ( f"Careful, disabling adapter layers with bias configured to be '{val}' does not produce the same " "output as the base model would without adaption." ) warnings.warn(msg) self._set_adapter_layers(enabled=False) def set_adapter(self, adapter_name: str | list[str]) -> None: """Set the active adapter(s). Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `list[str]`): Name of the adapter(s) to be activated. """ for module in self.model.modules(): if isinstance(module, VBLoRALayer): if module.merged: warnings.warn("Adapter cannot be set when the model is merged. Unmerging the model first.") module.unmerge() module.set_adapter(adapter_name) self.active_adapter = adapter_name @staticmethod def _prepare_adapter_config(peft_config, model_config): if peft_config.target_modules is None: if model_config["model_type"] not in TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING: raise ValueError("Please specify `target_modules` in `peft_config`") peft_config.target_modules = set( TRANSFORMERS_MODELS_TO_VBLORA_TARGET_MODULES_MAPPING[model_config["model_type"]] ) return peft_config def _unload_and_optionally_merge( self, merge=True, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None, ): key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] desc = "Unloading " + ("and merging " if merge else "") + "model" for key in tqdm(key_list, disable=not progressbar, desc=desc): try: parent, target, target_name = _get_submodules(self.model, key) except AttributeError: continue if hasattr(target, "base_layer"): if merge: target.merge(safe_merge=safe_merge, adapter_names=adapter_names) self._replace_module(parent, target_name, target.get_base_layer(), target) elif isinstance(target, ModulesToSaveWrapper): # save any additional trainable modules part of `modules_to_save` setattr(parent, target_name, target.modules_to_save[target.active_adapter]) return self.model def delete_adapter(self, adapter_name: str) -> None: """ Deletes an existing adapter. Args: adapter_name (str): Name of the adapter to be deleted. """ if adapter_name not in list(self.peft_config.keys()): raise ValueError(f"Adapter {adapter_name} does not exist") del self.peft_config[adapter_name] key_list = [key for key, _ in self.model.named_modules() if self.prefix not in key] new_adapter = None for key in key_list: _, target, _ = _get_submodules(self.model, key) if isinstance(target, VBLoRALayer): target.delete_adapter(adapter_name) if new_adapter is None: new_adapter = target.active_adapter[:] self.active_adapter = new_adapter or [] self._delete_auxiliary_adapter(adapter_name, new_active_adapters=new_adapter) def merge_and_unload( self, progressbar: bool = False, safe_merge: bool = False, adapter_names: Optional[list[str]] = None ) -> torch.nn.Module: r""" This method merges the VBLoRA layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`list[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModel >>> base_model = AutoModelForCausalLM.from_pretrained("tiiuae/falcon-40b") >>> peft_model_id = "smangrul/falcon-40B-int4-peft-lora-sfttrainer-sample" >>> model = PeftModel.from_pretrained(base_model, peft_model_id) >>> merged_model = model.merge_and_unload() ``` """ return self._unload_and_optionally_merge( progressbar=progressbar, safe_merge=safe_merge, adapter_names=adapter_names ) def unload(self): """ Gets back the base model by removing all the VBLoRA modules without merging. This gives back the original base model. """ return self._unload_and_optionally_merge(merge=False) def get_nb_savable_parameters(self, adapter="default") -> tuple[int, int]: r""" Returns the number of savable VB-LoRA parameters and other savable parameters. """ logits_params = 0 vector_bank_params = 0 other_params = 0 for name, param in self.named_parameters(): if "vblora_logits" in name: logits_params += param.numel() elif "vblora_vector_bank" in name: vector_bank_params += param.numel() elif param.requires_grad: other_params += param.numel() if self.peft_config[adapter].save_only_topk_weights: num_vectors = self.peft_config[adapter].num_vectors factor = 1 # factor to count float32-equivalent parameters if num_vectors < 2**8: factor = 0.25 elif num_vectors < 2**15: factor = 0.5 elif num_vectors < 2**31: factor = 1 else: factor = 2 topk_weight_params = ( logits_params / self.peft_config[adapter].num_vectors * (self.peft_config[adapter].topk - 1) ) topk_indices_params = ( logits_params / self.peft_config[adapter].num_vectors * self.peft_config[adapter].topk * factor ) vblora_params = int(vector_bank_params + topk_weight_params + topk_indices_params) else: vblora_params = vector_bank_params + logits_params return vblora_params, other_params def print_savable_parameters(self) -> None: r""" Prints the number of savable VB-LoRA parameters and total savable parameters. """ vblora_params, other_params = self.get_nb_savable_parameters() print( f"VB-LoRA params to-be-saved (float32-equivalent): {vblora_params:,d} " f"|| total params to-be-saved: {(vblora_params + other_params):,d}" )
peft/src/peft/tuners/vblora/model.py/0
{ "file_path": "peft/src/peft/tuners/vblora/model.py", "repo_id": "peft", "token_count": 8433 }
251
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Reference code: https://github.com/yxli2123/LoftQ/blob/main/utils.py # Reference paper: https://huggingface.co/papers/2310.08659 from __future__ import annotations import logging import os from typing import Callable, Optional, Union import torch from accelerate.utils.memory import clear_device_cache from huggingface_hub import snapshot_download from huggingface_hub.errors import HFValidationError, LocalEntryNotFoundError from safetensors import SafetensorError, safe_open from transformers.utils import cached_file from transformers.utils.hub import get_checkpoint_shard_files from peft.import_utils import is_bnb_4bit_available, is_bnb_available, is_xpu_available class NFQuantizer: def __init__(self, num_bits=2, device="cuda", method="normal", block_size=64, *args, **kwargs): super().__init__(*args, **kwargs) self.num_bits = num_bits self.device = device self.method = method self.block_size = block_size if self.method == "normal": self.norm_lookup_table = self.create_normal_map(num_bits=self.num_bits) self.norm_lookup_table = self.norm_lookup_table.to(device) elif self.method == "uniform": self.norm_lookup_table = self.create_uniform_map(num_bits=self.num_bits) self.norm_lookup_table = self.norm_lookup_table.to(device) else: raise NotImplementedError("Other quantization methods not supported yet.") @staticmethod def create_uniform_map(symmetric=False, num_bits=4): if symmetric: # print("symmetric uniform quantization") negative = torch.linspace(-1, 0, 2 ** (num_bits - 1)) positive = torch.linspace(0, 1, 2 ** (num_bits - 1)) table = torch.cat([negative, positive[1:]]) else: # print("asymmetric uniform quantization") table = torch.linspace(-1, 1, 2**num_bits) return table @staticmethod def create_normal_map(offset=0.9677083, symmetric=False, num_bits=2): try: from scipy.stats import norm except ImportError: raise ImportError("The required package 'scipy' is not installed. Please install it to continue.") variations = 2**num_bits if symmetric: v = norm.ppf(torch.linspace(1 - offset, offset, variations + 1)).tolist() values = [] for index in range(len(v) - 1): values.append(0.5 * v[index] + 0.5 * v[index + 1]) v = values else: # one more positive value, this is an asymmetric type v1 = norm.ppf(torch.linspace(offset, 0.5, variations // 2 + 1)[:-1]).tolist() v2 = [0] v3 = (-norm.ppf(torch.linspace(offset, 0.5, variations // 2)[:-1])).tolist() v = v1 + v2 + v3 values = torch.Tensor(v) values = values.sort().values values /= values.max() return values def quantize_tensor(self, weight): max_abs = torch.abs(weight).max() weight_normed = weight / max_abs weight_normed_expanded = weight_normed.unsqueeze(-1) # Reshape L to have the same number of dimensions as X_expanded L_reshaped = torch.tensor(self.norm_lookup_table).reshape(1, -1) # Calculate the absolute difference between X_expanded and L_reshaped abs_diff = torch.abs(weight_normed_expanded - L_reshaped) # Find the index of the minimum absolute difference for each element qweight = torch.argmin(abs_diff, dim=-1) return qweight, max_abs def dequantize_tensor(self, qweight, max_abs): qweight_flatten = qweight.flatten() weight_normed = self.norm_lookup_table[qweight_flatten] weight = weight_normed * max_abs weight = weight.reshape(qweight.shape) return weight def quantize_block(self, weight): if len(weight.shape) != 2: raise ValueError(f"Only support 2D matrix, but your input has {len(weight.shape)} dimensions.") if weight.shape[0] * weight.shape[1] % self.block_size != 0: raise ValueError( f"Weight with shape ({weight.shape[0]} x {weight.shape[1]}) " f"is not dividable by block size {self.block_size}." ) M, N = weight.shape device = weight.device # Quantization weight_flatten = weight.flatten() # (M*N, ) weight_block = weight_flatten.reshape(-1, self.block_size) # (L, B), L = M * N / B if self.method == "normal": weight_max = weight_block.abs().max(dim=-1)[0] # (L, 1) elif self.method == "uniform": weight_max = weight_block.mean(dim=-1) + 2.5 * weight_block.std(dim=-1) else: raise NotImplementedError("Method not supported yet.") weight_max = weight_max.unsqueeze(-1) weight_divabs = weight_block / weight_max # (L, B) weight_divabs = weight_divabs.unsqueeze(-1) # (L, B, 1) L_reshaped = self.norm_lookup_table.reshape(1, -1) # (1, 2**K) abs_diff = torch.abs(weight_divabs - L_reshaped) # (L, B, 2**K) qweight = torch.argmin(abs_diff, dim=-1) # (L, B) # Pack multiple k-bit into uint8 qweight = qweight.reshape(-1, 8 // self.num_bits) qweight_pack = torch.zeros((M * N // 8 * self.num_bits, 1), dtype=torch.uint8, device=device) # data format example: # [1, 0, 3, 2] or [01, 00, 11, 10] -> [10110001], LIFO for i in range(8 // self.num_bits): qweight[:, i] = qweight[:, i] << i * self.num_bits qweight_pack[:, 0] |= qweight[:, i] return qweight_pack, weight_max, weight.shape def dequantize_block(self, qweight, weight_max, weight_shape): # unpack weight device = qweight.device weight = torch.zeros((qweight.shape[0], 8 // self.num_bits), dtype=torch.float32, device=device) for i in range(8 // self.num_bits): lookup_table_idx = qweight.to(torch.long) % 2**self.num_bits # get the most right 2 bits lookup_table_idx = lookup_table_idx.to(torch.long) weight[:, i] = self.norm_lookup_table[lookup_table_idx].squeeze() qweight = qweight >> self.num_bits # right shift 2 bits of the original data weight_block = weight.reshape(-1, self.block_size) weight = weight_block * weight_max weight = weight.reshape(weight_shape) return weight def _low_rank_decomposition(weight, reduced_rank=32): """ :param weight: The matrix to decompose, of shape (H, W) :param reduced_rank: the final rank :return: """ matrix_dimension = len(weight.size()) if matrix_dimension != 2: raise ValueError(f"Only support 2D matrix, but your input has {matrix_dimension} dimensions.") # Use SVD to decompose a matrix, default full_matrices is False to save parameters U, S, Vh = torch.linalg.svd(weight, full_matrices=False) L = U @ (torch.sqrt(torch.diag(S)[:, 0:reduced_rank])) R = torch.sqrt(torch.diag(S)[0:reduced_rank, :]) @ Vh return {"L": L, "R": R, "U": U, "S": S, "Vh": Vh, "reduced_rank": reduced_rank} @torch.no_grad() def loftq_init(weight: Union[torch.Tensor, torch.nn.Parameter], num_bits: int, reduced_rank: int, num_iter=1): if is_bnb_available(): import bitsandbytes as bnb else: raise ValueError("bitsandbytes is not available, please install it to use LoftQ.") if num_bits not in [2, 4, 8]: raise ValueError("Only support 2, 4, 8 bits quantization") if num_iter <= 0: raise ValueError("Number of iterations must be greater than 0") out_feature, in_feature = weight.size() device = weight.device dtype = weight.dtype logging.info( f"Weight: ({out_feature}, {in_feature}) | Rank: {reduced_rank} | Num Iter: {num_iter} | Num Bits: {num_bits}" ) if not is_bnb_4bit_available() or num_bits in [2, 8]: quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64) compute_device = device else: compute_device = "xpu" if is_xpu_available() else "cuda" weight = weight.to(device=compute_device, dtype=torch.float32) res = weight.clone() for i in range(num_iter): clear_device_cache() # Quantization if num_bits == 4 and is_bnb_4bit_available(): qweight = bnb.nn.Params4bit( res.to("cpu"), requires_grad=False, compress_statistics=False, quant_type="nf4" ).to(compute_device) dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state) else: quantized_weight, max_abs, shape = quantizer.quantize_block(res) dequantized_weight = quantizer.dequantize_block(quantized_weight, max_abs, shape) res = weight - dequantized_weight # Decompose the residual by SVD output = _low_rank_decomposition(res, reduced_rank=reduced_rank) L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"] res = weight - torch.mm(L, R) lora_A, lora_B = R, L return dequantized_weight.to(device=device, dtype=dtype), lora_A, lora_B @torch.no_grad() def _loftq_init_new(qweight, weight, num_bits: int, reduced_rank: int): import bitsandbytes as bnb if num_bits != 4: raise ValueError("Only 4 bit quantization supported at the moment.") if not is_bnb_4bit_available(): raise ValueError("bitsandbytes 4bit quantization is not available.") compute_device = "xpu" if is_xpu_available() else "cuda" dequantized_weight = bnb.functional.dequantize_4bit(qweight.data, qweight.quant_state) weight = weight.to(device=compute_device, dtype=torch.float32) residual = weight - dequantized_weight clear_device_cache() # Decompose the residualidual by SVD output = _low_rank_decomposition(residual, reduced_rank=reduced_rank) L, R, reduced_rank = output["L"], output["R"], output["reduced_rank"] return R, L class _SafetensorLoader: """ Simple utility class that loads tensors with safetensors from a single file or sharded files. Takes care of file name normalization etc. """ def __init__(self, peft_model, model_path): if model_path is None: try: model_path = snapshot_download(peft_model.base_model.config._name_or_path, local_files_only=True) except (AttributeError, HFValidationError) as exc: raise ValueError( "The provided model does not appear to be a transformers model or is a local model. In this case, " "you must pass the model_path argument that points to the safetensors file." ) from exc except LocalEntryNotFoundError as exc: raise ValueError( "The model.safetensors file must be present on disk, but it could not be found." ) from exc suffix = "model.safetensors" if not model_path.endswith(suffix): model_path = os.path.join(model_path, suffix) self.model_path = model_path self.base_model_prefix = getattr(peft_model.get_base_model(), "base_model_prefix", None) self.prefix = "base_model.model." self.is_sharded = False self.weight_map = None if not os.path.exists(model_path): # check if the file is sharded par_dir = model_path.rpartition(os.path.sep)[0] try: resolved_archive_file, sharded_metadata = get_checkpoint_shard_files( par_dir, cached_file(par_dir, "model.safetensors.index.json") ) except OSError as exc: raise FileNotFoundError( f"Could not find file for {model_path}, ensure that there is a (sharded) safetensors file of the model." ) from exc self.is_sharded = True # maps from 'model-X-of-Y.safetensors' to full file path file_map = {k.rpartition(os.path.sep)[-1]: k for k in resolved_archive_file} self.weight_map = {k: file_map[v] for k, v in sharded_metadata["weight_map"].items()} def get_tensor(self, name): if not self.is_sharded: file_path = self.model_path else: file_path = self.weight_map[name] with safe_open(file_path, framework="pt", device="cpu") as f: try: tensor = f.get_tensor(name) except SafetensorError as exc: # no matching key found, we probably need to remove the base model prefix if self.base_model_prefix: # remove 1 extra character for "." name = name[len(self.base_model_prefix) + 1 :] tensor = f.get_tensor(name) else: raise exc return tensor @torch.no_grad() def replace_lora_weights_loftq( peft_model, model_path: Optional[str] = None, adapter_name: str = "default", callback: Optional[Callable[[torch.nn.Module, str], bool]] = None, ): """ Replace the LoRA weights of a model quantized with bitsandbytes, using the LoftQ technique. The replacement is done on the fly by loading in the non-quantized weights from a locally stored safetensors model file and initializing the LoRA weights such that the quantization error between the original and quantized weights is minimized. As lazy loading is not possible with pickle, normal PyTorch checkpoint files cannot be supported. Depending on the model size, calling this function may take some time to finish. Args: peft_model (`PeftModel`): The model to replace the weights of. Must be a quantized PEFT model with LoRA layers. model_path (`Optional[str]`): The path to the model safetensors file. If the model is a Hugging Face model, this will be inferred from the model's config. Otherwise, it must be provided. adapter_name (`str`): The name of the adapter to replace the weights of. The default adapter name is "default". callback (`Optional[Callable[[PeftModel, str], bool]]`): A callback function that will be called after each module is replaced. The callback function should take the model and the name of the current module as input and return a boolean indicating whether the replacement should be kept. If the callback returns False, the replacement will be rolled back. This can be very useful to confirm that the LoftQ initialization actually decreases the quantization error of the model. As an example, this callback could generate logits for given input and compare it with the logits from the original, non-quanitzed model with the same input, and only return `True` if there is an improvement. As this is a greedy optimization, it's possible that calling this function multiple times yields incremental improvements. """ if not is_bnb_4bit_available(): raise ValueError("bitsandbytes must be installed and the model must be quantized in 4bits.") from peft.tuners.lora import Linear4bit # model_path = _check_model_path_loftq(model_path, peft_model) prefix = "base_model.model." any_match = False safetensor_loader = _SafetensorLoader(peft_model, model_path) # if too slow, consider adding tqdm as an option for name, module in peft_model.named_modules(): if not isinstance(module, Linear4bit): continue if not name.startswith(prefix): raise TypeError("The passed model does not appear to be a valid PeftModel") any_match = True name = name[len(prefix) :] tensor = safetensor_loader.get_tensor(name + ".weight") reduced_rank = module.r[adapter_name] lora_A, lora_B = _loftq_init_new(module.weight, tensor, num_bits=4, reduced_rank=reduced_rank) if not callback: module.lora_A[adapter_name].weight.data = lora_A module.lora_B[adapter_name].weight.data = lora_B continue lora_A_before = module.lora_A[adapter_name].weight.data lora_B_before = module.lora_B[adapter_name].weight.data module.lora_A[adapter_name].weight.data = lora_A module.lora_B[adapter_name].weight.data = lora_B should_replace = callback(peft_model, name) if not should_replace: # roll back module.lora_A[adapter_name].weight.data = lora_A_before module.lora_B[adapter_name].weight.data = lora_B_before del lora_A_before, lora_B_before if not any_match: raise ValueError("No bnb LoRA module found on the model")
peft/src/peft/utils/loftq_utils.py/0
{ "file_path": "peft/src/peft/utils/loftq_utils.py", "repo_id": "peft", "token_count": 7291 }
252
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import json import os import pickle import tempfile import warnings import pytest from peft import ( AdaLoraConfig, AdaptionPromptConfig, BOFTConfig, FourierFTConfig, HRAConfig, IA3Config, LNTuningConfig, LoHaConfig, LoKrConfig, LoraConfig, MultitaskPromptTuningConfig, OFTConfig, PeftConfig, PeftType, PolyConfig, PrefixTuningConfig, PromptEncoder, PromptEncoderConfig, PromptTuningConfig, RoadConfig, TaskType, VBLoRAConfig, VeraConfig, ) PEFT_MODELS_TO_TEST = [("peft-internal-testing/tiny-opt-lora-revision", "test")] # Config classes and their mandatory parameters ALL_CONFIG_CLASSES = ( (AdaLoraConfig, {"total_step": 1}), (AdaptionPromptConfig, {}), (BOFTConfig, {}), (FourierFTConfig, {}), (HRAConfig, {}), (IA3Config, {}), (LNTuningConfig, {}), (LoHaConfig, {}), (LoKrConfig, {}), (LoraConfig, {}), (MultitaskPromptTuningConfig, {}), (PolyConfig, {}), (PrefixTuningConfig, {}), (PromptEncoderConfig, {}), (PromptTuningConfig, {}), (RoadConfig, {}), (VeraConfig, {}), (VBLoRAConfig, {}), ) class TestPeftConfig: @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_methods(self, config_class, mandatory_kwargs): r""" Test if all configs have the expected methods. Here we test - to_dict - save_pretrained - from_pretrained - from_json_file """ # test if all configs have the expected methods config = config_class(**mandatory_kwargs) assert hasattr(config, "to_dict") assert hasattr(config, "save_pretrained") assert hasattr(config, "from_pretrained") assert hasattr(config, "from_json_file") @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) @pytest.mark.parametrize("valid_task_type", list(TaskType) + [None]) def test_valid_task_type(self, config_class, mandatory_kwargs, valid_task_type): r""" Test if all configs work correctly for all valid task types """ config_class(task_type=valid_task_type, **mandatory_kwargs) @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_invalid_task_type(self, config_class, mandatory_kwargs): r""" Test if all configs correctly raise the defined error message for invalid task types. """ invalid_task_type = "invalid-task-type" with pytest.raises( ValueError, match=f"Invalid task type: '{invalid_task_type}'. Must be one of the following task types: {', '.join(TaskType)}.", ): config_class(task_type=invalid_task_type, **mandatory_kwargs) def test_from_peft_type(self): r""" Test if the config is correctly loaded using: - from_peft_type """ from peft.mapping import PEFT_TYPE_TO_CONFIG_MAPPING for peft_type in PeftType: expected_cls = PEFT_TYPE_TO_CONFIG_MAPPING[peft_type] mandatory_config_kwargs = {} if expected_cls == AdaLoraConfig: mandatory_config_kwargs = {"total_step": 1} config = PeftConfig.from_peft_type(peft_type=peft_type, **mandatory_config_kwargs) assert type(config) is expected_cls @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_from_pretrained(self, config_class, mandatory_kwargs): r""" Test if the config is correctly loaded using: - from_pretrained """ for model_name, revision in PEFT_MODELS_TO_TEST: # Test we can load config from delta config_class.from_pretrained(model_name, revision=revision) @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_save_pretrained(self, config_class, mandatory_kwargs): r""" Test if the config is correctly saved and loaded using - save_pretrained """ config = config_class(**mandatory_kwargs) with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config_from_pretrained = config_class.from_pretrained(tmp_dirname) assert config.to_dict() == config_from_pretrained.to_dict() @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_from_json_file(self, config_class, mandatory_kwargs): config = config_class(**mandatory_kwargs) with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config_path = os.path.join(tmp_dirname, "adapter_config.json") config_from_json = config_class.from_json_file(config_path) assert config.to_dict() == config_from_json # Also test with a runtime_config entry -- they should be ignored, even if they # were accidentally saved to disk config_from_json["runtime_config"] = {"ephemeral_gpu_offload": True} json.dump(config_from_json, open(config_path, "w")) config_from_json = config_class.from_json_file(config_path) assert config.to_dict() == config_from_json @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_to_dict(self, config_class, mandatory_kwargs): r""" Test if the config can be correctly converted to a dict using: - to_dict """ config = config_class(**mandatory_kwargs) assert isinstance(config.to_dict(), dict) @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_from_pretrained_cache_dir(self, config_class, mandatory_kwargs): r""" Test if the config is correctly loaded with extra kwargs """ with tempfile.TemporaryDirectory() as tmp_dirname: for model_name, revision in PEFT_MODELS_TO_TEST: # Test we can load config from delta config_class.from_pretrained(model_name, revision=revision, cache_dir=tmp_dirname) def test_from_pretrained_cache_dir_remote(self): r""" Test if the config is correctly loaded with a checkpoint from the hub """ with tempfile.TemporaryDirectory() as tmp_dirname: PeftConfig.from_pretrained("ybelkada/test-st-lora", cache_dir=tmp_dirname) assert "models--ybelkada--test-st-lora" in os.listdir(tmp_dirname) @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_save_pretrained_with_runtime_config(self, config_class, mandatory_kwargs): r""" Test if the config correctly removes runtime config when saving """ with tempfile.TemporaryDirectory() as tmp_dirname: for model_name, revision in PEFT_MODELS_TO_TEST: cfg = config_class.from_pretrained(model_name, revision=revision) # NOTE: cfg is always a LoraConfig here, because the configuration of the loaded model was a LoRA. # Hence we can expect a runtime_config to exist regardless of config_class. cfg.runtime_config.ephemeral_gpu_offload = True cfg.save_pretrained(tmp_dirname) cfg = config_class.from_pretrained(tmp_dirname) assert not cfg.runtime_config.ephemeral_gpu_offload @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_set_attributes(self, config_class, mandatory_kwargs): # manually set attributes and check if they are correctly written config = config_class(peft_type="test", **mandatory_kwargs) # save pretrained with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config_from_pretrained = config_class.from_pretrained(tmp_dirname) assert config.to_dict() == config_from_pretrained.to_dict() @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_config_copy(self, config_class, mandatory_kwargs): # see https://github.com/huggingface/peft/issues/424 config = config_class(**mandatory_kwargs) copied = copy.copy(config) assert config.to_dict() == copied.to_dict() @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_config_deepcopy(self, config_class, mandatory_kwargs): # see https://github.com/huggingface/peft/issues/424 config = config_class(**mandatory_kwargs) copied = copy.deepcopy(config) assert config.to_dict() == copied.to_dict() @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_config_pickle_roundtrip(self, config_class, mandatory_kwargs): # see https://github.com/huggingface/peft/issues/424 config = config_class(**mandatory_kwargs) copied = pickle.loads(pickle.dumps(config)) assert config.to_dict() == copied.to_dict() def test_prompt_encoder_warning_num_layers(self): # This test checks that if a prompt encoder config is created with an argument that is ignored, there should be # warning. However, there should be no warning if the default value is used. kwargs = { "num_virtual_tokens": 20, "num_transformer_submodules": 1, "token_dim": 768, "encoder_hidden_size": 768, } # there should be no warning with just default argument for encoder_num_layer config = PromptEncoderConfig(**kwargs) with warnings.catch_warnings(): PromptEncoder(config) # when changing encoder_num_layer, there should be a warning for MLP since that value is not used config = PromptEncoderConfig(encoder_num_layers=123, **kwargs) with pytest.warns(UserWarning) as record: PromptEncoder(config) expected_msg = "for MLP, the argument `encoder_num_layers` is ignored. Exactly 2 MLP layers are used." assert str(record.list[0].message) == expected_msg @pytest.mark.parametrize( "config_class", [LoHaConfig, LoraConfig, IA3Config, OFTConfig, BOFTConfig, HRAConfig, VBLoRAConfig] ) def test_save_pretrained_with_target_modules(self, config_class): # See #1041, #1045 config = config_class(target_modules=["a", "list"]) with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config_from_pretrained = config_class.from_pretrained(tmp_dirname) assert config.to_dict() == config_from_pretrained.to_dict() # explicit test that target_modules should be converted to set assert isinstance(config_from_pretrained.target_modules, set) def test_regex_with_layer_indexing_lora(self): # This test checks that an error is raised if `target_modules` is a regex expression and `layers_to_transform` or # `layers_pattern` are not None invalid_config1 = {"target_modules": ".*foo", "layers_to_transform": [0]} invalid_config2 = {"target_modules": ".*foo", "layers_pattern": ["bar"]} valid_config = {"target_modules": ["foo"], "layers_pattern": ["bar"], "layers_to_transform": [0]} with pytest.raises(ValueError, match="`layers_to_transform` cannot be used when `target_modules` is a str."): LoraConfig(**invalid_config1) with pytest.raises(ValueError, match="`layers_pattern` cannot be used when `target_modules` is a str."): LoraConfig(**invalid_config2) # should run without errors LoraConfig(**valid_config) def test_ia3_is_feedforward_subset_invalid_config(self): # This test checks that the IA3 config raises a value error if the feedforward_modules argument # is not a subset of the target_modules argument # an example invalid config invalid_config = {"target_modules": ["k", "v"], "feedforward_modules": ["q"]} with pytest.raises(ValueError, match="^`feedforward_modules` should be a subset of `target_modules`$"): IA3Config(**invalid_config) def test_ia3_is_feedforward_subset_valid_config(self): # This test checks that the IA3 config is created without errors with valid arguments. # feedforward_modules should be a subset of target_modules if both are lists # an example valid config with regex expressions. valid_config_regex_exp = { "target_modules": ".*.(SelfAttention|EncDecAttention|DenseReluDense).*(q|v|wo)$", "feedforward_modules": ".*.DenseReluDense.wo$", } # an example valid config with module lists. valid_config_list = {"target_modules": ["k", "v", "wo"], "feedforward_modules": ["wo"]} # should run without errors IA3Config(**valid_config_regex_exp) IA3Config(**valid_config_list) def test_adalora_config_r_warning(self): # This test checks that a warning is raised when r is set other than default in AdaLoraConfig # No warning should be raised when initializing AdaLoraConfig with default values. kwargs = {"peft_type": "ADALORA", "task_type": "SEQ_2_SEQ_LM", "init_r": 12, "lora_alpha": 32, "total_step": 1} # Test that no warning is raised with default initialization with warnings.catch_warnings(): warnings.simplefilter("error") try: AdaLoraConfig(**kwargs) except Warning: pytest.fail("AdaLoraConfig raised a warning with default initialization.") # Test that a warning is raised when r != 8 in AdaLoraConfig with pytest.warns(UserWarning, match="Note that `r` is not used in AdaLora and will be ignored."): AdaLoraConfig(r=10, total_step=1) def test_adalora_config_correct_timing_still_works(self): pass @pytest.mark.parametrize( "timing_kwargs", [ {"total_step": 100, "tinit": 0, "tfinal": 0}, {"total_step": 100, "tinit": 10, "tfinal": 10}, {"total_step": 100, "tinit": 79, "tfinal": 20}, {"total_step": 100, "tinit": 80, "tfinal": 19}, ], ) def test_adalora_config_valid_timing_works(self, timing_kwargs): # Make sure that passing correct timing values is not prevented by faulty config checks. AdaLoraConfig(**timing_kwargs) # does not raise def test_adalora_config_invalid_total_step_raises(self): with pytest.raises(ValueError) as e: AdaLoraConfig(total_step=None) assert "AdaLoRA does not work when `total_step` is None, supply a value > 0." in str(e) @pytest.mark.parametrize( "timing_kwargs", [ {"total_step": 100, "tinit": 20, "tfinal": 80}, {"total_step": 100, "tinit": 80, "tfinal": 20}, {"total_step": 10, "tinit": 20, "tfinal": 0}, {"total_step": 10, "tinit": 0, "tfinal": 10}, {"total_step": 10, "tinit": 10, "tfinal": 0}, {"total_step": 10, "tinit": 20, "tfinal": 0}, {"total_step": 10, "tinit": 20, "tfinal": 20}, {"total_step": 10, "tinit": 0, "tfinal": 20}, ], ) def test_adalora_config_timing_bounds_error(self, timing_kwargs): # Check if the user supplied timing values that will certainly fail because it breaks # AdaLoRA assumptions. with pytest.raises(ValueError) as e: AdaLoraConfig(**timing_kwargs) assert "The supplied schedule values don't allow for a budgeting phase" in str(e) @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_from_pretrained_forward_compatible(self, config_class, mandatory_kwargs, tmp_path, recwarn): """ Make it possible to load configs that contain unknown keys by ignoring them. The idea is to make PEFT configs forward-compatible with future versions of the library. """ config = config_class(**mandatory_kwargs) config.save_pretrained(tmp_path) # add a spurious key to the config with open(tmp_path / "adapter_config.json") as f: config_dict = json.load(f) config_dict["foobar"] = "baz" config_dict["spam"] = 123 with open(tmp_path / "adapter_config.json", "w") as f: json.dump(config_dict, f) msg = f"Unexpected keyword arguments ['foobar', 'spam'] for class {config_class.__name__}, these are ignored." config_from_pretrained = config_class.from_pretrained(tmp_path) assert len(recwarn) == 1 assert recwarn.list[0].message.args[0].startswith(msg) assert "foo" not in config_from_pretrained.to_dict() assert "spam" not in config_from_pretrained.to_dict() assert config.to_dict() == config_from_pretrained.to_dict() assert isinstance(config_from_pretrained, config_class) @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_from_pretrained_forward_compatible_load_from_peft_config( self, config_class, mandatory_kwargs, tmp_path, recwarn ): """Exact same test as before, but instead of using LoraConfig.from_pretrained, AdaLoraconfig.from_pretrained, etc. use PeftConfig.from_pretrained. This covers a previously existing bug where only the known arguments from PeftConfig would be used instead of the more specific config (which is known thanks to the peft_type attribute). """ config = config_class(**mandatory_kwargs) config.save_pretrained(tmp_path) # add a spurious key to the config with open(tmp_path / "adapter_config.json") as f: config_dict = json.load(f) config_dict["foobar"] = "baz" config_dict["spam"] = 123 with open(tmp_path / "adapter_config.json", "w") as f: json.dump(config_dict, f) msg = f"Unexpected keyword arguments ['foobar', 'spam'] for class {config_class.__name__}, these are ignored." config_from_pretrained = PeftConfig.from_pretrained(tmp_path) # <== use PeftConfig here assert len(recwarn) == 1 assert recwarn.list[0].message.args[0].startswith(msg) assert "foo" not in config_from_pretrained.to_dict() assert "spam" not in config_from_pretrained.to_dict() assert config.to_dict() == config_from_pretrained.to_dict() assert isinstance(config_from_pretrained, config_class) @pytest.mark.parametrize("config_class, mandatory_kwargs", ALL_CONFIG_CLASSES) def test_from_pretrained_sanity_check(self, config_class, mandatory_kwargs, tmp_path): """Following up on the previous test about forward compatibility, we *don't* want any random json to be accepted as a PEFT config. There should be a minimum set of required keys. """ non_peft_json = {"foo": "bar", "baz": 123} with open(tmp_path / "adapter_config.json", "w") as f: json.dump(non_peft_json, f) msg = f"The {config_class.__name__} config that is trying to be loaded is missing required keys: {{'peft_type'}}." with pytest.raises(TypeError, match=msg): config_class.from_pretrained(tmp_path) def test_lora_config_layers_to_transform_validation(self): """Test that specifying layers_pattern without layers_to_transform raises an error""" with pytest.raises( ValueError, match="When `layers_pattern` is specified, `layers_to_transform` must also be specified." ): LoraConfig(r=8, lora_alpha=16, target_modules=["query", "value"], layers_pattern="model.layers") # Test that specifying both layers_to_transform and layers_pattern works fine config = LoraConfig( r=8, lora_alpha=16, target_modules=["query", "value"], layers_to_transform=[0, 1, 2], layers_pattern="model.layers", ) assert config.layers_to_transform == [0, 1, 2] assert config.layers_pattern == "model.layers" # Test that not specifying either works fine config = LoraConfig( r=8, lora_alpha=16, target_modules=["query", "value"], ) assert config.layers_to_transform is None assert config.layers_pattern is None
peft/tests/test_config.py/0
{ "file_path": "peft/tests/test_config.py", "repo_id": "peft", "token_count": 8696 }
253
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import torch from torch import nn from peft.import_utils import is_bnb_available from peft.optimizers import create_loraplus_optimizer from .testing_utils import require_bitsandbytes, torch_device if is_bnb_available(): import bitsandbytes as bnb class SimpleNet(nn.Module): def __init__(self, bias=True): super().__init__() self.embedding = nn.Embedding(100, 20) self.layer_norm = nn.LayerNorm(20) self.lin0 = nn.Linear(20, 20, bias=bias) self.relu = nn.ReLU() self.lin1 = nn.Linear(20, 16, bias=bias) def forward(self, X): X = self.lin0(self.layer_norm(self.embedding(X))) X = self.relu(X) X = self.lin1(X) return X @require_bitsandbytes def test_lora_plus_helper_sucess(): model = SimpleNet() optimizer_cls = bnb.optim.Adam8bit lr = 5e-5 optim_config = { "eps": 1e-6, "betas": (0.9, 0.999), "loraplus_weight_decay": 0.0, } loraplus_lr_ratio = 1.2 loraplus_lr_embedding = 1e-6 optim = create_loraplus_optimizer( model=model, optimizer_cls=optimizer_cls, lr=lr, loraplus_lr_ratio=loraplus_lr_ratio, loraplus_lr_embedding=loraplus_lr_embedding, **optim_config, ) assert optim is not None assert len(optim.param_groups) == 4 assert optim.param_groups[0]["lr"] == lr assert optim.param_groups[1]["lr"] == loraplus_lr_embedding assert optim.param_groups[2]["lr"] == optim.param_groups[3]["lr"] == (lr * loraplus_lr_ratio) @require_bitsandbytes def test_lora_plus_optimizer_sucess(): """ Test if the optimizer is correctly created and step function runs without any exception """ optimizer_cls = bnb.optim.Adam8bit optim_config = { "eps": 1e-6, "betas": (0.9, 0.999), "loraplus_weight_decay": 0.0, } model: SimpleNet = SimpleNet().to(torch_device) optim = create_loraplus_optimizer( model=model, optimizer_cls=optimizer_cls, lr=5e-5, loraplus_lr_ratio=1.2, loraplus_lr_embedding=1e-6, **optim_config, ) loss = torch.nn.CrossEntropyLoss() bnb.optim.GlobalOptimManager.get_instance().register_parameters(model.parameters()) x = torch.randint(100, (2, 4, 10)).to(torch_device) output = model(x).permute(0, 3, 1, 2) label = torch.randint(16, (2, 4, 10)).to(torch_device) loss_value = loss(output, label) loss_value.backward() optim.step()
peft/tests/test_loraplus.py/0
{ "file_path": "peft/tests/test_loraplus.py", "repo_id": "peft", "token_count": 1328 }
254
# Copyright 2024-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This test file is for tests specific to VeRA, since VeRA has some specific challenges due to the shared weights. import os import pytest import torch from accelerate.utils.imports import is_bf16_available from safetensors import safe_open from torch import nn from peft import PeftModel, VeraConfig, get_peft_model class MLP(nn.Module): def __init__(self, bias=True): super().__init__() self.relu = nn.ReLU() self.lin0 = nn.Linear(10, 20, bias=bias) self.lin1 = nn.Linear(20, 20, bias=bias) # lin1 and lin2 have same shape self.lin2 = nn.Linear(20, 20, bias=bias) self.lin3 = nn.Linear(20, 2, bias=bias) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = self.lin0(X) X = self.relu(X) X = self.lin1(X) X = self.relu(X) X = self.lin2(X) X = self.relu(X) X = self.lin3(X) X = self.sm(X) return X class TestVera: @pytest.fixture def mlp(self): torch.manual_seed(0) model = MLP() return model @pytest.fixture def mlp_same_prng(self, mlp): torch.manual_seed(0) config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False) # creates a default VeRA adapter peft_model = get_peft_model(mlp, config) config2 = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False) peft_model.add_adapter("other", config2) return peft_model def test_multiple_adapters_same_prng_weights(self, mlp_same_prng): # we can have multiple adapters with the same prng key, in which case the weights should be shared assert ( mlp_same_prng.base_model.model.lin1.vera_A["default"] is mlp_same_prng.base_model.model.lin1.vera_A["other"] ) assert ( mlp_same_prng.base_model.model.lin1.vera_B["default"] is mlp_same_prng.base_model.model.lin1.vera_B["other"] ) assert ( mlp_same_prng.base_model.model.lin2.vera_A["default"] is mlp_same_prng.base_model.model.lin2.vera_A["other"] ) assert ( mlp_same_prng.base_model.model.lin2.vera_B["default"] is mlp_same_prng.base_model.model.lin2.vera_B["other"] ) input = torch.randn(5, 10) mlp_same_prng.set_adapter("default") output_default = mlp_same_prng(input) mlp_same_prng.set_adapter("other") output_other = mlp_same_prng(input) assert not torch.allclose(output_default, output_other, atol=1e-3, rtol=1e-3) def test_multiple_adapters_different_prng_raises(self): # we cannot have multiple adapters with different prng keys model = MLP() config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False) # creates a default VeRA adapter peft_model = get_peft_model(model, config) config2 = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, projection_prng_key=123) msg = ( r"Vera PRNG initialisation key must be the same for all adapters. Got config.projection_prng_key=123 but " r"previous config had 0" ) with pytest.raises(ValueError, match=msg): peft_model.add_adapter("other", config2) def test_multiple_adapters_save_load_save_projection_true(self, mlp_same_prng, tmp_path): # check saving and loading works with multiple adapters and saved projection weights torch.manual_seed(0) input = torch.randn(5, 10) mlp_same_prng.set_adapter("default") output_default = mlp_same_prng(input) mlp_same_prng.set_adapter("other") output_other = mlp_same_prng(input) # sanity check assert not torch.allclose(output_default, output_other, atol=1e-3, rtol=1e-3) save_path = tmp_path / "vera" mlp_same_prng.save_pretrained(save_path) assert os.path.exists(save_path / "adapter_config.json") assert os.path.exists(save_path / "other" / "adapter_config.json") torch.manual_seed(0) mlp = MLP() peft_model = PeftModel.from_pretrained(mlp, save_path) peft_model.load_adapter(save_path / "other", "other") peft_model.set_adapter("default") output_default_loaded = peft_model(input) peft_model.set_adapter("other") output_other_loaded = peft_model(input) assert torch.allclose(output_default, output_default_loaded, atol=1e-3, rtol=1e-3) assert torch.allclose(output_other, output_other_loaded, atol=1e-3, rtol=1e-3) def test_multiple_adapters_save_load_save_projection_false(self, mlp, tmp_path): # check saving and loading works with multiple adapters without saved projection weights torch.manual_seed(1) config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False) # creates a default VeRA adapter peft_model = get_peft_model(mlp, config, adapter_name="first") config2 = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False) peft_model.add_adapter("second", config2) input = torch.randn(5, 10) peft_model.set_adapter("first") output_first = peft_model(input) peft_model.set_adapter("second") output_second = peft_model(input) # sanity check assert not torch.allclose(output_first, output_second, atol=1e-3, rtol=1e-3) save_path = tmp_path / "vera" peft_model.save_pretrained(save_path) assert os.path.exists(save_path / "first" / "adapter_config.json") assert os.path.exists(save_path / "second" / "adapter_config.json") torch.manual_seed(0) mlp = MLP() peft_model = PeftModel.from_pretrained(mlp, save_path / "first", adapter_name="first") peft_model.load_adapter(save_path / "second", "second") peft_model.set_adapter("first") output_first_loaded = peft_model(input) peft_model.set_adapter("second") output_second_loaded = peft_model(input) assert torch.allclose(output_first, output_first_loaded, atol=1e-3, rtol=1e-3) assert torch.allclose(output_second, output_second_loaded, atol=1e-3, rtol=1e-3) def test_multiple_adapters_save_projection_true_contains_vera_A_vera_B(self, mlp_same_prng, tmp_path): # check that the state_dicts don't contain the projection weights save_path = tmp_path / "vera" mlp_same_prng.save_pretrained(save_path) sd_default = {} with safe_open(save_path / "adapter_model.safetensors", framework="pt", device="cpu") as f: for key in f.keys(): sd_default[key] = f.get_tensor(key) assert any("vera_A" in key for key in sd_default) assert any("vera_B" in key for key in sd_default) # default rank for VeRA is 256 assert sd_default["base_model.vera_A"].shape == (256, 20) assert sd_default["base_model.vera_B"].shape == (20, 256) sd_other = {} with safe_open(save_path / "other" / "adapter_model.safetensors", framework="pt", device="cpu") as f: for key in f.keys(): sd_other[key] = f.get_tensor(key) assert any("vera_A" in key for key in sd_other) assert any("vera_B" in key for key in sd_other) assert sd_other["base_model.vera_A"].shape == (256, 20) assert sd_other["base_model.vera_B"].shape == (20, 256) def test_multiple_adapters_save_projection_false_contains_no_vera_A_vera_B(self, mlp, tmp_path): torch.manual_seed(1) config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False) # creates a default VeRA adapter peft_model = get_peft_model(mlp, config, adapter_name="first") config2 = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False, save_projection=False) peft_model.add_adapter("second", config2) save_path = tmp_path / "vera" peft_model.save_pretrained(save_path) sd_default = {} with safe_open(save_path / "first" / "adapter_model.safetensors", framework="pt", device="cpu") as f: for key in f.keys(): sd_default[key] = f.get_tensor(key) assert not any("vera_A" in key for key in sd_default) assert not any("vera_B" in key for key in sd_default) sd_other = {} with safe_open(save_path / "second" / "adapter_model.safetensors", framework="pt", device="cpu") as f: for key in f.keys(): sd_other[key] = f.get_tensor(key) assert not any("vera_A" in key for key in sd_other) assert not any("vera_B" in key for key in sd_other) def test_vera_A_vera_B_share_memory(self, mlp_same_prng): vera_A = mlp_same_prng.vera_A["default"] vera_B = mlp_same_prng.vera_B["default"] # these tensors should share the same data assert vera_A.data_ptr() == mlp_same_prng.base_model.model.lin1.vera_A["default"].data_ptr() assert vera_B.data_ptr() == mlp_same_prng.base_model.model.lin1.vera_B["default"].data_ptr() assert vera_A.data_ptr() == mlp_same_prng.base_model.model.lin2.vera_A["default"].data_ptr() assert vera_B.data_ptr() == mlp_same_prng.base_model.model.lin2.vera_B["default"].data_ptr() # sanity check: these tensors shouldn't share the same data assert vera_A.data_ptr() != vera_B.data_ptr() def test_vera_lambda_dont_share_memory(self, mlp_same_prng): # sanity check: these tensors shouldn't share the same data assert ( mlp_same_prng.base_model.model.lin1.vera_lambda_b["default"].data_ptr() != mlp_same_prng.base_model.model.lin1.vera_lambda_b["other"].data_ptr() ) assert ( mlp_same_prng.base_model.model.lin1.vera_lambda_b["default"].data_ptr() != mlp_same_prng.base_model.model.lin2.vera_lambda_b["default"].data_ptr() ) assert ( mlp_same_prng.base_model.model.lin1.vera_lambda_b["other"].data_ptr() != mlp_same_prng.base_model.model.lin2.vera_lambda_b["other"].data_ptr() ) assert ( mlp_same_prng.base_model.model.lin1.vera_lambda_d["default"].data_ptr() != mlp_same_prng.base_model.model.lin1.vera_lambda_d["other"].data_ptr() ) assert ( mlp_same_prng.base_model.model.lin1.vera_lambda_d["default"].data_ptr() != mlp_same_prng.base_model.model.lin2.vera_lambda_d["default"].data_ptr() ) assert ( mlp_same_prng.base_model.model.lin1.vera_lambda_d["other"].data_ptr() != mlp_same_prng.base_model.model.lin2.vera_lambda_d["other"].data_ptr() ) def test_vera_different_shapes(self, mlp): config = VeraConfig(target_modules=["lin0", "lin3"], init_weights=False) mlp_different_shapes = get_peft_model(mlp, config) vera_A = mlp_different_shapes.vera_A["default"] vera_B = mlp_different_shapes.vera_B["default"] # sanity check assert mlp.lin0.base_layer.weight.shape != mlp.lin3.base_layer.weight.shape # lin0 has the largest output dimension, lin3 has the largest input dimension # vera_A should have the shape of (rank, largest_in), vera_B should have the shape of (largest_out, rank) assert vera_A.shape == (config.r, mlp.lin3.in_features) assert vera_B.shape == (mlp.lin0.out_features, config.r) # should not raise input = torch.randn(5, 10) mlp_different_shapes(input) @pytest.mark.parametrize("dtype", [torch.float32, torch.float16, torch.bfloat16]) def test_vera_dtypes(self, dtype): if dtype == torch.bfloat16: # skip if bf16 is not supported on hardware, see #1872 if not is_bf16_available(): pytest.skip("bfloat16 not supported on this system, skipping the test") model = MLP().to(dtype) config = VeraConfig(target_modules=["lin1", "lin2"], init_weights=False) peft_model = get_peft_model(model, config) inputs = torch.randn(5, 10).to(dtype) output = peft_model(inputs) # should not raise assert output.dtype == dtype
peft/tests/test_vera.py/0
{ "file_path": "peft/tests/test_vera.py", "repo_id": "peft", "token_count": 5855 }
255
include timm/models/_pruned/*.txt include timm/data/_info/*.txt include timm/data/_info/*.json
pytorch-image-models/MANIFEST.in/0
{ "file_path": "pytorch-image-models/MANIFEST.in", "repo_id": "pytorch-image-models", "token_count": 34 }
256
# Installation Before you start, you'll need to setup your environment and install the appropriate packages. `timm` is tested on **Python 3+**. ## Virtual Environment You should install `timm` in a [virtual environment](https://docs.python.org/3/library/venv.html) to keep things tidy and avoid dependency conflicts. 1. Create and navigate to your project directory: ```bash mkdir ~/my-project cd ~/my-project ``` 2. Start a virtual environment inside your directory: ```bash python -m venv .env ``` 3. Activate and deactivate the virtual environment with the following commands: ```bash # Activate the virtual environment source .env/bin/activate # Deactivate the virtual environment source .env/bin/deactivate ``` Once you've created your virtual environment, you can install `timm` in it. ## Using pip The most straightforward way to install `timm` is with pip: ```bash pip install timm ``` Alternatively, you can install `timm` from GitHub directly to get the latest, bleeding-edge version: ```bash pip install git+https://github.com/rwightman/pytorch-image-models.git ``` Run the following command to check if `timm` has been properly installed: ```bash python -c "from timm import list_models; print(list_models(pretrained=True)[:5])" ``` This command lists the first five pretrained models available in `timm` (which are sorted alphebetically). You should see the following output: ```python ['adv_inception_v3', 'bat_resnext26ts', 'beit_base_patch16_224', 'beit_base_patch16_224_in22k', 'beit_base_patch16_384'] ``` ## From Source Building `timm` from source lets you make changes to the code base. To install from the source, clone the repository and install with the following commands: ```bash git clone https://github.com/rwightman/pytorch-image-models.git cd pytorch-image-models pip install -e . ``` Again, you can check if `timm` was properly installed with the following command: ```bash python -c "from timm import list_models; print(list_models(pretrained=True)[:5])" ```
pytorch-image-models/hfdocs/source/installation.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/installation.mdx", "repo_id": "pytorch-image-models", "token_count": 623 }
257
# FBNet **FBNet** is a type of convolutional neural architectures discovered through [DNAS](https://paperswithcode.com/method/dnas) neural architecture search. It utilises a basic type of image model block inspired by [MobileNetv2](https://paperswithcode.com/method/mobilenetv2) that utilises depthwise convolutions and an inverted residual structure (see components). The principal building block is the [FBNet Block](https://paperswithcode.com/method/fbnet-block). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('fbnetc_100', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `fbnetc_100`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('fbnetc_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{wu2019fbnet, title={FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search}, author={Bichen Wu and Xiaoliang Dai and Peizhao Zhang and Yanghan Wang and Fei Sun and Yiming Wu and Yuandong Tian and Peter Vajda and Yangqing Jia and Kurt Keutzer}, year={2019}, eprint={1812.03443}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: FBNet Paper: Title: 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search' URL: https://paperswithcode.com/paper/fbnet-hardware-aware-efficient-convnet-design Models: - Name: fbnetc_100 In Collection: FBNet Metadata: FLOPs: 508940064 Parameters: 5570000 File Size: 22525094 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Dropout - FBNet Block - Global Average Pooling - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x GPUs ID: fbnetc_100 LR: 0.1 Epochs: 360 Layers: 22 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0005 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L985 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.12% Top 5 Accuracy: 92.37% -->
pytorch-image-models/hfdocs/source/models/fbnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/fbnet.mdx", "repo_id": "pytorch-image-models", "token_count": 1708 }
258
# MnasNet **MnasNet** is a type of convolutional neural network optimized for mobile devices that is discovered through mobile neural architecture search, which explicitly incorporates model latency into the main objective so that the search can identify a model that achieves a good trade-off between accuracy and latency. The main building block is an [inverted residual block](https://paperswithcode.com/method/inverted-residual-block) (from [MobileNetV2](https://paperswithcode.com/method/mobilenetv2)). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('mnasnet_100', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `mnasnet_100`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('mnasnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @misc{tan2019mnasnet, title={MnasNet: Platform-Aware Neural Architecture Search for Mobile}, author={Mingxing Tan and Bo Chen and Ruoming Pang and Vijay Vasudevan and Mark Sandler and Andrew Howard and Quoc V. Le}, year={2019}, eprint={1807.11626}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: MNASNet Paper: Title: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile' URL: https://paperswithcode.com/paper/mnasnet-platform-aware-neural-architecture Models: - Name: mnasnet_100 In Collection: MNASNet Metadata: FLOPs: 416415488 Parameters: 4380000 File Size: 17731774 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - Global Average Pooling - Inverted Residual Block - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet ID: mnasnet_100 Layers: 100 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4000 Image Size: '224' Interpolation: bicubic RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L894 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.67% Top 5 Accuracy: 92.1% - Name: semnasnet_100 In Collection: MNASNet Metadata: FLOPs: 414570766 Parameters: 3890000 File Size: 15731489 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - Global Average Pooling - Inverted Residual Block - Max Pooling - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Data: - ImageNet ID: semnasnet_100 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L928 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.45% Top 5 Accuracy: 92.61% -->
pytorch-image-models/hfdocs/source/models/mnasnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/mnasnet.mdx", "repo_id": "pytorch-image-models", "token_count": 2104 }
259
# SelecSLS **SelecSLS** uses novel selective long and short range skip connections to improve the information flow allowing for a drastically faster network without compromising accuracy. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('selecsls42b', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `selecsls42b`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('selecsls42b', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @article{Mehta_2020, title={XNect}, volume={39}, ISSN={1557-7368}, url={http://dx.doi.org/10.1145/3386569.3392410}, DOI={10.1145/3386569.3392410}, number={4}, journal={ACM Transactions on Graphics}, publisher={Association for Computing Machinery (ACM)}, author={Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, year={2020}, month={Jul} } ``` <!-- Type: model-index Collections: - Name: SelecSLS Paper: Title: 'XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera' URL: https://paperswithcode.com/paper/xnect-real-time-multi-person-3d-human-pose Models: - Name: selecsls42b In Collection: SelecSLS Metadata: FLOPs: 3824022528 Parameters: 32460000 File Size: 129948954 Architecture: - Batch Normalization - Convolution - Dense Connections - Dropout - Global Average Pooling - ReLU - SelecSLS Block Tasks: - Image Classification Training Techniques: - Cosine Annealing - Random Erasing Training Data: - ImageNet ID: selecsls42b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L335 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.18% Top 5 Accuracy: 93.39% - Name: selecsls60 In Collection: SelecSLS Metadata: FLOPs: 4610472600 Parameters: 30670000 File Size: 122839714 Architecture: - Batch Normalization - Convolution - Dense Connections - Dropout - Global Average Pooling - ReLU - SelecSLS Block Tasks: - Image Classification Training Techniques: - Cosine Annealing - Random Erasing Training Data: - ImageNet ID: selecsls60 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L342 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.99% Top 5 Accuracy: 93.83% - Name: selecsls60b In Collection: SelecSLS Metadata: FLOPs: 4657653144 Parameters: 32770000 File Size: 131252898 Architecture: - Batch Normalization - Convolution - Dense Connections - Dropout - Global Average Pooling - ReLU - SelecSLS Block Tasks: - Image Classification Training Techniques: - Cosine Annealing - Random Erasing Training Data: - ImageNet ID: selecsls60b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L349 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.41% Top 5 Accuracy: 94.18% -->
pytorch-image-models/hfdocs/source/models/selecsls.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/selecsls.mdx", "repo_id": "pytorch-image-models", "token_count": 2423 }
260
# Xception **Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution layers](https://paperswithcode.com/method/depthwise-separable-convolution). The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('xception', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.inference_mode(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `xception`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('xception', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../training_script) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/ZagoruykoK16, @misc{chollet2017xception, title={Xception: Deep Learning with Depthwise Separable Convolutions}, author={François Chollet}, year={2017}, eprint={1610.02357}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Xception Paper: Title: 'Xception: Deep Learning with Depthwise Separable Convolutions' URL: https://paperswithcode.com/paper/xception-deep-learning-with-depthwise Models: - Name: xception In Collection: Xception Metadata: FLOPs: 10600506792 Parameters: 22860000 File Size: 91675053 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: xception Crop Pct: '0.897' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception.py#L229 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.05% Top 5 Accuracy: 94.4% - Name: xception41 In Collection: Xception Metadata: FLOPs: 11681983232 Parameters: 26970000 File Size: 108422028 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: xception41 Crop Pct: '0.903' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L181 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_41-e6439c97.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.54% Top 5 Accuracy: 94.28% - Name: xception65 In Collection: Xception Metadata: FLOPs: 17585702144 Parameters: 39920000 File Size: 160536780 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: xception65 Crop Pct: '0.903' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L200 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_65-c9ae96e8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.55% Top 5 Accuracy: 94.66% - Name: xception71 In Collection: Xception Metadata: FLOPs: 22817346560 Parameters: 42340000 File Size: 170295556 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: xception71 Crop Pct: '0.903' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L219 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_71-8eec7df1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.88% Top 5 Accuracy: 94.93% -->
pytorch-image-models/hfdocs/source/models/xception.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/xception.mdx", "repo_id": "pytorch-image-models", "token_count": 2677 }
261
"""NaFlex data loader for dynamic sequence length training. This module provides a specialized data loader for Vision Transformer models that supports: - Dynamic sequence length sampling during training for improved efficiency - Variable patch size training with probabilistic selection - Patch-level random erasing augmentation - Efficient GPU prefetching with normalization Hacked together by / Copyright 2025, Ross Wightman, Hugging Face """ import math from contextlib import suppress from functools import partial from typing import Callable, Dict, Iterator, List, Optional, Tuple, Union import torch from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from .loader import _worker_init, adapt_to_chs from .naflex_dataset import NaFlexMapDatasetWrapper, NaFlexCollator from .naflex_random_erasing import PatchRandomErasing from .transforms_factory import create_transform class NaFlexPrefetchLoader: """Data prefetcher for NaFlex format which normalizes patches.""" def __init__( self, loader: torch.utils.data.DataLoader, mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...] = IMAGENET_DEFAULT_STD, channels: int = 3, device: torch.device = torch.device('cuda'), img_dtype: Optional[torch.dtype] = None, re_prob: float = 0., re_mode: str = 'const', re_count: int = 1, re_num_splits: int = 0, ) -> None: """Initialize NaFlexPrefetchLoader. Args: loader: DataLoader to prefetch from. mean: Mean values for normalization. std: Standard deviation values for normalization. channels: Number of image channels. device: Device to move tensors to. img_dtype: Data type for image tensors. re_prob: Random erasing probability. re_mode: Random erasing mode. re_count: Maximum number of erasing rectangles. re_num_splits: Number of augmentation splits. """ self.loader = loader self.device = device self.img_dtype = img_dtype or torch.float32 # Create mean/std tensors for normalization (will be applied to patches) mean = adapt_to_chs(mean, channels) std = adapt_to_chs(std, channels) normalization_shape = (1, 1, channels) self.channels = channels self.mean = torch.tensor( [x * 255 for x in mean], device=device, dtype=self.img_dtype).view(normalization_shape) self.std = torch.tensor( [x * 255 for x in std], device=device, dtype=self.img_dtype).view(normalization_shape) if re_prob > 0.: self.random_erasing = PatchRandomErasing( erase_prob=re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device=device, ) else: self.random_erasing = None # Check for CUDA/NPU availability self.is_cuda = device.type == 'cuda' and torch.cuda.is_available() self.is_npu = device.type == 'npu' and torch.npu.is_available() def __iter__(self) -> Iterator[Tuple[Dict[str, torch.Tensor], torch.Tensor]]: """Iterate through the loader with prefetching and normalization. Yields: Tuple of (input_dict, targets) with normalized patches. """ first = True if self.is_cuda: stream = torch.cuda.Stream(device=self.device) stream_context = partial(torch.cuda.stream, stream=stream) elif self.is_npu: stream = torch.npu.Stream(device=self.device) stream_context = partial(torch.npu.stream, stream=stream) else: stream = None stream_context = suppress for next_input_dict, next_target in self.loader: with stream_context(): # Move all tensors in input_dict to device for k, v in next_input_dict.items(): if isinstance(v, torch.Tensor): dtype = self.img_dtype if k == 'patches' else None next_input_dict[k] = next_input_dict[k].to( device=self.device, non_blocking=True, dtype=dtype, ) next_target = next_target.to(device=self.device, non_blocking=True) # Normalize patch values - handle both [B, N, P*P*C] and [B, N, Ph, Pw, C] formats patches_tensor = next_input_dict['patches'] original_shape = patches_tensor.shape if patches_tensor.ndim == 3: # Format: [B, N, P*P*C] - flattened patches batch_size, num_patches, patch_pixels = original_shape # To [B*N, P*P, C] for normalization and erasing patches = patches_tensor.view(batch_size, num_patches, -1, self.channels) elif patches_tensor.ndim == 5: # Format: [B, N, Ph, Pw, C] - unflattened patches (variable patch size mode) batch_size, num_patches, patch_h, patch_w, channels = original_shape assert channels == self.channels, f"Expected {self.channels} channels, got {channels}" # To [B*N, Ph*Pw, C] for normalization and erasing patches = patches_tensor.view(batch_size, num_patches, -1, self.channels) else: raise ValueError(f"Unexpected patches tensor dimensions: {patches_tensor.ndim}. Expected 3 or 5.") # Apply normalization patches = patches.sub(self.mean).div(self.std) if self.random_erasing is not None: patches = self.random_erasing( patches, patch_coord=next_input_dict['patch_coord'], patch_valid=next_input_dict.get('patch_valid', None), ) # Reshape back to original format next_input_dict['patches'] = patches.view(original_shape) if not first: yield input_dict, target else: first = False if stream is not None: if self.is_cuda: torch.cuda.current_stream(device=self.device).wait_stream(stream) elif self.is_npu: torch.npu.current_stream(device=self.device).wait_stream(stream) input_dict = next_input_dict target = next_target yield input_dict, target def __len__(self) -> int: """Get length of underlying loader. Returns: Number of batches in the loader. """ return len(self.loader) @property def sampler(self): """Get sampler from underlying loader. Returns: Sampler from the underlying DataLoader. """ return self.loader.sampler @property def dataset(self): """Get dataset from underlying loader. Returns: Dataset from the underlying DataLoader. """ return self.loader.dataset def create_naflex_loader( dataset, patch_size: Optional[Union[Tuple[int, int], int]] = None, patch_size_choices: Optional[List[int]] = None, patch_size_choice_probs: Optional[List[float]] = None, train_seq_lens: Tuple[int, ...] = (128, 256, 576, 784, 1024), max_seq_len: int = 576, batch_size: int = 32, is_training: bool = False, mixup_fn: Optional[Callable] = None, no_aug: bool = False, re_prob: float = 0., re_mode: str = 'const', re_count: int = 1, re_split: bool = False, train_crop_mode: Optional[str] = None, scale: Optional[Tuple[float, float]] = None, ratio: Optional[Tuple[float, float]] = None, hflip: float = 0.5, vflip: float = 0., color_jitter: float = 0.4, color_jitter_prob: Optional[float] = None, grayscale_prob: float = 0., gaussian_blur_prob: float = 0., auto_augment: Optional[str] = None, num_aug_repeats: int = 0, num_aug_splits: int = 0, interpolation: str = 'bilinear', mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN, std: Tuple[float, ...] = IMAGENET_DEFAULT_STD, crop_pct: Optional[float] = None, crop_mode: Optional[str] = None, crop_border_pixels: Optional[int] = None, num_workers: int = 4, distributed: bool = False, rank: int = 0, world_size: int = 1, seed: int = 42, epoch: int = 0, use_prefetcher: bool = True, pin_memory: bool = True, img_dtype: torch.dtype = torch.float32, device: Union[str, torch.device] = torch.device('cuda'), persistent_workers: bool = True, worker_seeding: str = 'all', ) -> Union[torch.utils.data.DataLoader, NaFlexPrefetchLoader]: """Create a data loader with dynamic sequence length sampling for training. Args: dataset: Dataset to load from. patch_size: Single patch size to use. patch_size_choices: List of patch sizes for variable patch size training. patch_size_choice_probs: Probabilities for each patch size choice. train_seq_lens: Training sequence lengths for dynamic batching. max_seq_len: Fixed sequence length for validation. batch_size: Batch size for validation and max training sequence length. is_training: Whether this is for training (enables dynamic batching). mixup_fn: Optional mixup function. no_aug: Disable augmentation. re_prob: Random erasing probability. re_mode: Random erasing mode. re_count: Maximum number of erasing rectangles. re_split: Random erasing split flag. train_crop_mode: Training crop mode. scale: Scale range for random resize crop. ratio: Aspect ratio range for random resize crop. hflip: Horizontal flip probability. vflip: Vertical flip probability. color_jitter: Color jitter factor. color_jitter_prob: Color jitter probability. grayscale_prob: Grayscale conversion probability. gaussian_blur_prob: Gaussian blur probability. auto_augment: AutoAugment policy. num_aug_repeats: Number of augmentation repeats. num_aug_splits: Number of augmentation splits. interpolation: Interpolation method. mean: Normalization mean values. std: Normalization standard deviation values. crop_pct: Crop percentage for validation. crop_mode: Crop mode. crop_border_pixels: Crop border pixels. num_workers: Number of data loading workers. distributed: Whether using distributed training. rank: Process rank for distributed training. world_size: Total number of processes. seed: Random seed. epoch: Starting epoch. use_prefetcher: Whether to use prefetching. pin_memory: Whether to pin memory. img_dtype: Image data type. device: Device to move tensors to. persistent_workers: Whether to use persistent workers. worker_seeding: Worker seeding mode. Returns: DataLoader or NaFlexPrefetchLoader instance. """ if is_training: # For training, use the dynamic sequence length mechanism assert num_aug_repeats == 0, 'Augmentation repeats not currently supported in NaFlex loader' transform_factory = partial( create_transform, is_training=True, no_aug=no_aug, train_crop_mode=train_crop_mode, scale=scale, ratio=ratio, hflip=hflip, vflip=vflip, color_jitter=color_jitter, color_jitter_prob=color_jitter_prob, grayscale_prob=grayscale_prob, gaussian_blur_prob=gaussian_blur_prob, auto_augment=auto_augment, interpolation=interpolation, mean=mean, std=std, crop_pct=crop_pct, crop_mode=crop_mode, crop_border_pixels=crop_border_pixels, re_prob=re_prob, re_mode=re_mode, re_count=re_count, use_prefetcher=use_prefetcher, naflex=True, ) max_train_seq_len = max(train_seq_lens) max_tokens_per_batch = batch_size * max_train_seq_len if isinstance(dataset, torch.utils.data.IterableDataset): assert False, "IterableDataset Wrapper is a WIP" naflex_dataset = NaFlexMapDatasetWrapper( dataset, transform_factory=transform_factory, patch_size=patch_size, patch_size_choices=patch_size_choices, patch_size_choice_probs=patch_size_choice_probs, seq_lens=train_seq_lens, max_tokens_per_batch=max_tokens_per_batch, mixup_fn=mixup_fn, seed=seed, distributed=distributed, rank=rank, world_size=world_size, shuffle=True, epoch=epoch, ) # NOTE: Collation is handled by the dataset wrapper for training loader = torch.utils.data.DataLoader( naflex_dataset, batch_size=None, shuffle=False, num_workers=num_workers, sampler=None, pin_memory=pin_memory, worker_init_fn=partial(_worker_init, worker_seeding=worker_seeding), persistent_workers=persistent_workers ) if use_prefetcher: loader = NaFlexPrefetchLoader( loader, mean=mean, std=std, img_dtype=img_dtype, device=device, re_prob=re_prob, re_mode=re_mode, re_count=re_count, ) else: # For validation, use fixed sequence length (unchanged) dataset.transform = create_transform( is_training=False, interpolation=interpolation, mean=mean, std=std, # FIXME add crop args when sequence transforms support crop modes use_prefetcher=use_prefetcher, naflex=True, patch_size=patch_size, max_seq_len=max_seq_len, patchify=True, ) # Create the collator collate_fn = NaFlexCollator(max_seq_len=max_seq_len) # Handle distributed training sampler = None if distributed and not isinstance(dataset, torch.utils.data.IterableDataset): # For validation, use OrderedDistributedSampler from timm.data.distributed_sampler import OrderedDistributedSampler sampler = OrderedDistributedSampler(dataset) loader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers, sampler=sampler, collate_fn=collate_fn, pin_memory=pin_memory, drop_last=False, ) if use_prefetcher: loader = NaFlexPrefetchLoader( loader, mean=mean, std=std, img_dtype=img_dtype, device=device, ) return loader
pytorch-image-models/timm/data/naflex_loader.py/0
{ "file_path": "pytorch-image-models/timm/data/naflex_loader.py", "repo_id": "pytorch-image-models", "token_count": 7440 }
262
""" Dataset reader for webdataset Hacked together by / Copyright 2022 Ross Wightman """ import io import json import logging import math import os import random import sys from dataclasses import dataclass from functools import partial from itertools import islice from typing import Any, Callable, Dict, List, Optional, Tuple import torch import torch.distributed as dist import yaml from PIL import Image from torch.utils.data import Dataset, IterableDataset, get_worker_info try: import webdataset as wds from webdataset.filters import _shuffle, getfirst from webdataset.shardlists import expand_urls from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample except ImportError: wds = None expand_urls = None from .class_map import load_class_map from .reader import Reader from .shared_count import SharedCount _logger = logging.getLogger(__name__) SAMPLE_SHUFFLE_SIZE = int(os.environ.get('WDS_SHUFFLE_SIZE', 8192)) SAMPLE_INITIAL_SIZE = int(os.environ.get('WDS_INITIAL_SIZE', 2048)) def _load_info(root, names=('_info.json', 'info.json')): if isinstance(names, str): names = (names,) tried = [] err_str = '' for n in names: full_path = os.path.join(root, n) try: tried.append(full_path) with wds.gopen(full_path) as f: if n.endswith('.json'): info_dict = json.load(f) else: info_dict = yaml.safe_load(f) return info_dict except Exception as e: err_str = str(e) _logger.warning( f'Dataset info file not found at {tried}. Error: {err_str}. ' 'Falling back to provided split and size arg.') return {} @dataclass class SplitInfo: num_samples: int filenames: Tuple[str] shard_lengths: Tuple[int] = () alt_label: str = '' name: str = '' def _parse_split_info(split: str, info: Dict): def _info_convert(dict_info): return SplitInfo( num_samples=dict_info['num_samples'], filenames=tuple(dict_info['filenames']), shard_lengths=tuple(dict_info['shard_lengths']), alt_label=dict_info.get('alt_label', ''), name=dict_info['name'], ) if 'tar' in split or '..' in split: # split in WDS string braceexpand format, sample count can be included with a | separator # ex: `dataset-split-{0000..9999}.tar|100000` for 9999 shards, covering 100,000 samples split = split.split('|') num_samples = 0 split_name = '' if len(split) > 1: num_samples = int(split[1]) split = split[0] if '::' not in split: split_parts = split.split('-', 3) split_idx = len(split_parts) - 1 if split_idx and 'splits' in info and split_parts[split_idx] in info['splits']: split_name = split_parts[split_idx] split_filenames = expand_urls(split) if split_name: split_info = info['splits'][split_name] if not num_samples: _fc = {f: c for f, c in zip(split_info['filenames'], split_info['shard_lengths'])} num_samples = sum(_fc[f] for f in split_filenames) split_info['filenames'] = tuple(_fc.keys()) split_info['shard_lengths'] = tuple(_fc.values()) split_info['num_samples'] = num_samples split_info = _info_convert(split_info) else: split_info = SplitInfo( name=split_name, num_samples=num_samples, filenames=split_filenames, ) else: if 'splits' not in info or split not in info['splits']: raise RuntimeError(f"split {split} not found in info ({info.get('splits', {}).keys()})") split = split split_info = info['splits'][split] split_info = _info_convert(split_info) return split_info def log_and_continue(exn): """Call in an exception handler to ignore exceptions, issue a warning, and continue.""" _logger.warning(f'Handling webdataset error ({repr(exn)}). Ignoring.') # NOTE: try force an exit on errors that are clearly code / config and not transient if isinstance(exn, TypeError): raise exn return True def _decode( sample, image_key='jpg', image_mode='RGB', target_key='cls', alt_label='' ): """ Custom sample decode * decode and convert PIL Image * cls byte string label to int * pass through JSON byte string (if it exists) without parse """ # decode class label, skip if alternate label not valid if alt_label: # alternative labels are encoded in json metadata meta = json.loads(sample['json']) class_label = int(meta[alt_label]) if class_label < 0: # skipped labels currently encoded as -1, may change to a null/None value return None else: class_label = int(sample[target_key]) # decode image img = getfirst(sample, image_key) with io.BytesIO(img) as b: img = Image.open(b) img.load() if image_mode: img = img.convert(image_mode) # json passed through in undecoded state decoded = dict(jpg=img, cls=class_label, json=sample.get('json', None)) return decoded def pytorch_worker_seed(): """get dataloader worker seed from pytorch""" worker_info = get_worker_info() if worker_info is not None: # favour the seed already created for pytorch dataloader workers if it exists return worker_info.seed # fallback to wds rank based seed return wds.utils.pytorch_worker_seed() if wds is not None: # conditional to avoid mandatory wds import (via inheritance of wds.PipelineStage) class detshuffle2(wds.PipelineStage): def __init__( self, bufsize=1000, initial=100, seed=0, epoch=-1, ): self.bufsize = bufsize self.initial = initial self.seed = seed self.epoch = epoch def run(self, src): if isinstance(self.epoch, SharedCount): epoch = self.epoch.value else: # NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train) # situation as different workers may wrap at different times (or not at all). self.epoch += 1 epoch = self.epoch if self.seed < 0: seed = pytorch_worker_seed() + epoch else: seed = self.seed + epoch # _logger.info(f'shuffle seed: {self.seed}, {seed}, epoch: {epoch}') # FIXME temporary rng = random.Random(seed) return _shuffle(src, self.bufsize, self.initial, rng) else: detshuffle2 = None class ResampledShards2(IterableDataset): """An iterable dataset yielding a list of urls.""" def __init__( self, urls, nshards=sys.maxsize, worker_seed=None, deterministic=True, epoch=-1, ): """Sample shards from the shard list with replacement. :param urls: a list of URLs as a Python list or brace notation string """ super().__init__() urls = wds.shardlists.expand_urls(urls) self.urls = urls assert isinstance(self.urls[0], str) self.nshards = nshards self.rng = random.Random() self.worker_seed = pytorch_worker_seed if worker_seed is None else worker_seed self.deterministic = deterministic self.epoch = epoch def __iter__(self): """Return an iterator over the shards.""" if isinstance(self.epoch, SharedCount): epoch = self.epoch.value else: # NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train) # situation as different workers may wrap at different times (or not at all). self.epoch += 1 epoch = self.epoch if self.deterministic: # reset seed w/ epoch if deterministic, worker seed should be deterministic due to arg.seed self.rng = random.Random(self.worker_seed() + epoch) for _ in range(self.nshards): index = self.rng.randint(0, len(self.urls) - 1) yield dict(url=self.urls[index]) class ReaderWds(Reader): def __init__( self, root: str, name: Optional[str] = None, split: str = 'train', is_training: bool = False, num_samples: Optional[int] = None, batch_size: int = 1, repeats: int = 0, seed: int = 42, class_map: Optional[dict] = None, input_key: str = 'jpg;png;webp', input_img_mode: str = 'RGB', target_key: str = 'cls', target_img_mode: str = '', filename_key: str = 'filename', sample_shuffle_size: Optional[int] = None, sample_initial_size: Optional[int] = None, ): super().__init__() if wds is None: raise RuntimeError( 'Please install webdataset 0.2.x package `pip install git+https://github.com/webdataset/webdataset`.') self.root = root self.is_training = is_training self.batch_size = batch_size self.repeats = repeats self.common_seed = seed # a seed that's fixed across all worker / distributed instances self.shard_shuffle_size = 500 self.sample_shuffle_size = sample_shuffle_size or SAMPLE_SHUFFLE_SIZE self.sample_initial_size = sample_initial_size or SAMPLE_INITIAL_SIZE self.input_key = input_key self.input_img_mode = input_img_mode self.target_key = target_key self.filename_key = filename_key self.key_ext = '.JPEG' # extension to add to key for original filenames (DS specific, default ImageNet) self.info = _load_info(self.root) self.split_info = _parse_split_info(split, self.info) if num_samples is not None: self.num_samples = num_samples else: self.num_samples = self.split_info.num_samples if is_training and not self.num_samples: raise RuntimeError(f'Invalid split definition, num_samples not specified in train mode.') self.remap_class = False if class_map: self.class_to_idx = load_class_map(class_map) self.remap_class = True else: self.class_to_idx = {} # Distributed world state self.dist_rank = 0 self.dist_num_replicas = 1 if dist.is_available() and dist.is_initialized() and dist.get_world_size() > 1: self.dist_rank = dist.get_rank() self.dist_num_replicas = dist.get_world_size() # Attributes that are updated in _lazy_init self.worker_info = None self.worker_id = 0 self.worker_seed = seed # seed unique to each worker instance self.num_workers = 1 self.global_worker_id = 0 self.global_num_workers = 1 self.init_count = 0 self.epoch_count = SharedCount() # DataPipeline is lazy init, the majority of WDS DataPipeline could be init here, BUT, shuffle seed # is not handled in manner where it can be deterministic for each worker AND initialized up front self.ds = None def set_epoch(self, count): self.epoch_count.value = count def set_loader_cfg( self, num_workers: Optional[int] = None, ): if self.ds is not None: return if num_workers is not None: self.num_workers = num_workers self.global_num_workers = self.dist_num_replicas * self.num_workers def _lazy_init(self): """ Lazily initialize worker (in worker processes) """ if self.worker_info is None: worker_info = torch.utils.data.get_worker_info() if worker_info is not None: self.worker_info = worker_info self.worker_id = worker_info.id self.worker_seed = worker_info.seed self.num_workers = worker_info.num_workers self.global_num_workers = self.dist_num_replicas * self.num_workers self.global_worker_id = self.dist_rank * self.num_workers + self.worker_id # init data pipeline abs_shard_filenames = [os.path.join(self.root, f) for f in self.split_info.filenames] pipeline = [wds.SimpleShardList(abs_shard_filenames)] # at this point we have an iterator over all the shards if self.is_training: pipeline.extend([ detshuffle2( self.shard_shuffle_size, seed=self.common_seed, epoch=self.epoch_count, ), self._split_by_node_and_worker, # at this point, we have an iterator over the shards assigned to each worker wds.tarfile_to_samples(handler=log_and_continue), wds.shuffle( bufsize=self.sample_shuffle_size, initial=self.sample_initial_size, rng=random.Random(self.worker_seed) # this is why we lazy-init whole DataPipeline ), ]) else: pipeline.extend([ self._split_by_node_and_worker, # at this point, we have an iterator over the shards assigned to each worker wds.tarfile_to_samples(handler=log_and_continue), ]) pipeline.extend([ wds.map( partial( _decode, image_key=self.input_key, image_mode=self.input_img_mode, alt_label=self.split_info.alt_label, ), handler=log_and_continue, ), wds.rename(image=self.input_key, target=self.target_key) ]) self.ds = wds.DataPipeline(*pipeline) def _split_by_node_and_worker(self, src): if self.global_num_workers > 1: for s in islice(src, self.global_worker_id, None, self.global_num_workers): yield s else: for s in src: yield s def _num_samples_per_worker(self): num_worker_samples = self.num_samples / max(self.global_num_workers, self.dist_num_replicas) if self.is_training or self.dist_num_replicas > 1: num_worker_samples = math.ceil(num_worker_samples) if self.is_training: num_worker_samples = math.ceil(num_worker_samples / self.batch_size) * self.batch_size return int(num_worker_samples) def __iter__(self): if self.ds is None: self._lazy_init() num_worker_samples = self._num_samples_per_worker() if self.is_training or self.dist_num_replicas > 1: # NOTE: doing distributed validation w/ WDS is messy, hard to meet constraints that # same # of batches needed across all replicas w/ seeing each sample once. # with_epoch() is simple but could miss a shard's worth of samples in some workers, # and duplicate in others. Best to keep num DL workers low and a divisor of #val shards. ds = self.ds.with_epoch(num_worker_samples) else: ds = self.ds i = 0 # _logger.info(f'start {i}, {self.worker_id}') # FIXME temporary debug for sample in ds: target = sample['target'] if self.remap_class: target = self.class_to_idx[target] yield sample['image'], target i += 1 # _logger.info(f'end {i}, {self.worker_id}') # FIXME temporary debug def __len__(self): num_samples = self._num_samples_per_worker() * self.num_workers return num_samples def _filename(self, index, basename=False, absolute=False): assert False, "Not supported" # no random access to examples def filenames(self, basename=False, absolute=False): """ Return all filenames in dataset, overrides base""" if self.ds is None: self._lazy_init() names = [] for sample in self.ds: if self.filename_key in sample: name = sample[self.filename_key] elif '__key__' in sample: name = sample['__key__'] + self.key_ext else: assert False, "No supported name field present" names.append(name) if len(names) >= self.num_samples: break # safety for ds.repeat() case return names
pytorch-image-models/timm/data/readers/reader_wds.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/reader_wds.py", "repo_id": "pytorch-image-models", "token_count": 7881 }
263
""" Bottleneck Self Attention (Bottleneck Transformers) Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 @misc{2101.11605, Author = {Aravind Srinivas and Tsung-Yi Lin and Niki Parmar and Jonathon Shlens and Pieter Abbeel and Ashish Vaswani}, Title = {Bottleneck Transformers for Visual Recognition}, Year = {2021}, } Based on ref gist at: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 This impl is a WIP but given that it is based on the ref gist likely not too far off. Hacked together by / Copyright 2021 Ross Wightman """ from typing import List import torch import torch.nn as nn import torch.nn.functional as F from .helpers import to_2tuple, make_divisible from .weight_init import trunc_normal_ from .trace_utils import _assert def rel_logits_1d(q, rel_k, permute_mask: List[int]): """ Compute relative logits along one dimension As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 Args: q: (batch, heads, height, width, dim) rel_k: (2 * width - 1, dim) permute_mask: permute output dim according to this """ B, H, W, dim = q.shape x = (q @ rel_k.transpose(-1, -2)) x = x.reshape(-1, W, 2 * W -1) # pad to shift from relative to absolute indexing x_pad = F.pad(x, [0, 1]).flatten(1) x_pad = F.pad(x_pad, [0, W - 1]) # reshape and slice out the padded elements x_pad = x_pad.reshape(-1, W + 1, 2 * W - 1) x = x_pad[:, :W, W - 1:] # reshape and tile x = x.reshape(B, H, 1, W, W).expand(-1, -1, H, -1, -1) return x.permute(permute_mask) class PosEmbedRel(nn.Module): """ Relative Position Embedding As per: https://gist.github.com/aravindsrinivas/56359b79f0ce4449bcb04ab4b56a57a2 Originally from: `Attention Augmented Convolutional Networks` - https://arxiv.org/abs/1904.09925 """ def __init__(self, feat_size, dim_head, scale): super().__init__() self.height, self.width = to_2tuple(feat_size) self.dim_head = dim_head self.height_rel = nn.Parameter(torch.randn(self.height * 2 - 1, dim_head) * scale) self.width_rel = nn.Parameter(torch.randn(self.width * 2 - 1, dim_head) * scale) def forward(self, q): B, HW, _ = q.shape # relative logits in width dimension. q = q.reshape(B, self.height, self.width, -1) rel_logits_w = rel_logits_1d(q, self.width_rel, permute_mask=(0, 1, 3, 2, 4)) # relative logits in height dimension. q = q.transpose(1, 2) rel_logits_h = rel_logits_1d(q, self.height_rel, permute_mask=(0, 3, 1, 4, 2)) rel_logits = rel_logits_h + rel_logits_w rel_logits = rel_logits.reshape(B, HW, HW) return rel_logits class BottleneckAttn(nn.Module): """ Bottleneck Attention Paper: `Bottleneck Transformers for Visual Recognition` - https://arxiv.org/abs/2101.11605 The internal dimensions of the attention module are controlled by the interaction of several arguments. * the output dimension of the module is specified by dim_out, which falls back to input dim if not set * the value (v) dimension is set to dim_out // num_heads, the v projection determines the output dim * the query and key (qk) dimensions are determined by * num_heads * dim_head if dim_head is not None * num_heads * (dim_out * attn_ratio // num_heads) if dim_head is None * as seen above, attn_ratio determines the ratio of q and k relative to the output if dim_head not used Args: dim (int): input dimension to the module dim_out (int): output dimension of the module, same as dim if not set stride (int): output stride of the module, avg pool used if stride == 2 (default: 1). num_heads (int): parallel attention heads (default: 4) dim_head (int): dimension of query and key heads, calculated from dim_out * attn_ratio // num_heads if not set qk_ratio (float): ratio of q and k dimensions to output dimension when dim_head not set. (default: 1.0) qkv_bias (bool): add bias to q, k, and v projections scale_pos_embed (bool): scale the position embedding as well as Q @ K """ def __init__( self, dim, dim_out=None, feat_size=None, stride=1, num_heads=4, dim_head=None, qk_ratio=1.0, qkv_bias=False, scale_pos_embed=False): super().__init__() assert feat_size is not None, 'A concrete feature size matching expected input (H, W) is required' dim_out = dim_out or dim assert dim_out % num_heads == 0 self.num_heads = num_heads self.dim_head_qk = dim_head or make_divisible(dim_out * qk_ratio, divisor=8) // num_heads self.dim_head_v = dim_out // self.num_heads self.dim_out_qk = num_heads * self.dim_head_qk self.dim_out_v = num_heads * self.dim_head_v self.scale = self.dim_head_qk ** -0.5 self.scale_pos_embed = scale_pos_embed self.qkv = nn.Conv2d(dim, self.dim_out_qk * 2 + self.dim_out_v, 1, bias=qkv_bias) # NOTE I'm only supporting relative pos embedding for now self.pos_embed = PosEmbedRel(feat_size, dim_head=self.dim_head_qk, scale=self.scale) self.pool = nn.AvgPool2d(2, 2) if stride == 2 else nn.Identity() self.reset_parameters() def reset_parameters(self): trunc_normal_(self.qkv.weight, std=self.qkv.weight.shape[1] ** -0.5) # fan-in trunc_normal_(self.pos_embed.height_rel, std=self.scale) trunc_normal_(self.pos_embed.width_rel, std=self.scale) def forward(self, x): B, C, H, W = x.shape _assert(H == self.pos_embed.height, '') _assert(W == self.pos_embed.width, '') x = self.qkv(x) # B, (2 * dim_head_qk + dim_head_v) * num_heads, H, W # NOTE head vs channel split ordering in qkv projection was decided before I allowed qk to differ from v # So, this is more verbose than if heads were before qkv splits, but throughput is not impacted. q, k, v = torch.split(x, [self.dim_out_qk, self.dim_out_qk, self.dim_out_v], dim=1) q = q.reshape(B * self.num_heads, self.dim_head_qk, -1).transpose(-1, -2) k = k.reshape(B * self.num_heads, self.dim_head_qk, -1) # no transpose, for q @ k v = v.reshape(B * self.num_heads, self.dim_head_v, -1).transpose(-1, -2) if self.scale_pos_embed: attn = (q @ k + self.pos_embed(q)) * self.scale # B * num_heads, H * W, H * W else: attn = (q @ k) * self.scale + self.pos_embed(q) attn = attn.softmax(dim=-1) out = (attn @ v).transpose(-1, -2).reshape(B, self.dim_out_v, H, W) # B, dim_out, H, W out = self.pool(out) return out
pytorch-image-models/timm/layers/bottleneck_attn.py/0
{ "file_path": "pytorch-image-models/timm/layers/bottleneck_attn.py", "repo_id": "pytorch-image-models", "token_count": 2907 }
264
""" Filter Response Norm in PyTorch Based on `Filter Response Normalization Layer` - https://arxiv.org/abs/1911.09737 Hacked together by / Copyright 2021 Ross Wightman """ import torch import torch.nn as nn from .create_act import create_act_layer from .trace_utils import _assert def inv_instance_rms(x, eps: float = 1e-5): rms = x.square().float().mean(dim=(2, 3), keepdim=True).add(eps).rsqrt().to(x.dtype) return rms.expand(x.shape) class FilterResponseNormTlu2d(nn.Module): def __init__(self, num_features, apply_act=True, eps=1e-5, rms=True, **_): super(FilterResponseNormTlu2d, self).__init__() self.apply_act = apply_act # apply activation (non-linearity) self.rms = rms self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.tau = nn.Parameter(torch.zeros(num_features)) if apply_act else None self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) if self.tau is not None: nn.init.zeros_(self.tau) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = x * inv_instance_rms(x, self.eps) x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) return torch.maximum(x, self.tau.reshape(v_shape).to(dtype=x_dtype)) if self.tau is not None else x class FilterResponseNormAct2d(nn.Module): def __init__(self, num_features, apply_act=True, act_layer=nn.ReLU, inplace=None, rms=True, eps=1e-5, **_): super(FilterResponseNormAct2d, self).__init__() if act_layer is not None and apply_act: self.act = create_act_layer(act_layer, inplace=inplace) else: self.act = nn.Identity() self.rms = rms self.eps = eps self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.ones_(self.weight) nn.init.zeros_(self.bias) def forward(self, x): _assert(x.dim() == 4, 'expected 4D input') x_dtype = x.dtype v_shape = (1, -1, 1, 1) x = x * inv_instance_rms(x, self.eps) x = x * self.weight.view(v_shape).to(dtype=x_dtype) + self.bias.view(v_shape).to(dtype=x_dtype) return self.act(x)
pytorch-image-models/timm/layers/filter_response_norm.py/0
{ "file_path": "pytorch-image-models/timm/layers/filter_response_norm.py", "repo_id": "pytorch-image-models", "token_count": 1182 }
265
from typing import Optional import torch from torch import nn from torch import nn, Tensor from torch.nn.modules.transformer import _get_activation_fn def add_ml_decoder_head(model): if hasattr(model, 'global_pool') and hasattr(model, 'fc'): # most CNN models, like Resnet50 model.global_pool = nn.Identity() del model.fc num_classes = model.num_classes num_features = model.num_features model.fc = MLDecoder(num_classes=num_classes, initial_num_features=num_features) elif hasattr(model, 'global_pool') and hasattr(model, 'classifier'): # EfficientNet model.global_pool = nn.Identity() del model.classifier num_classes = model.num_classes num_features = model.num_features model.classifier = MLDecoder(num_classes=num_classes, initial_num_features=num_features) elif 'RegNet' in model._get_name() or 'TResNet' in model._get_name(): # hasattr(model, 'head') del model.head num_classes = model.num_classes num_features = model.num_features model.head = MLDecoder(num_classes=num_classes, initial_num_features=num_features) else: print("Model code-writing is not aligned currently with ml-decoder") exit(-1) if hasattr(model, 'drop_rate'): # Ml-Decoder has inner dropout model.drop_rate = 0 return model class TransformerDecoderLayerOptimal(nn.Module): def __init__(self, d_model, nhead=8, dim_feedforward=2048, dropout=0.1, activation="relu", layer_norm_eps=1e-5) -> None: super(TransformerDecoderLayerOptimal, self).__init__() self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.dropout = nn.Dropout(dropout) self.dropout1 = nn.Dropout(dropout) self.dropout2 = nn.Dropout(dropout) self.dropout3 = nn.Dropout(dropout) self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) # Implementation of Feedforward model self.linear1 = nn.Linear(d_model, dim_feedforward) self.linear2 = nn.Linear(dim_feedforward, d_model) self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps) self.activation = _get_activation_fn(activation) def __setstate__(self, state): if 'activation' not in state: state['activation'] = torch.nn.functional.relu super(TransformerDecoderLayerOptimal, self).__setstate__(state) def forward(self, tgt: Tensor, memory: Tensor, tgt_mask: Optional[Tensor] = None, memory_mask: Optional[Tensor] = None, tgt_key_padding_mask: Optional[Tensor] = None, memory_key_padding_mask: Optional[Tensor] = None) -> Tensor: tgt = tgt + self.dropout1(tgt) tgt = self.norm1(tgt) tgt2 = self.multihead_attn(tgt, memory, memory)[0] tgt = tgt + self.dropout2(tgt2) tgt = self.norm2(tgt) tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) tgt = tgt + self.dropout3(tgt2) tgt = self.norm3(tgt) return tgt # class ExtrapClasses(object): # def __init__(self, num_queries: int, group_size: int): # self.num_queries = num_queries # self.group_size = group_size # # def __call__(self, h: torch.Tensor, class_embed_w: torch.Tensor, class_embed_b: torch.Tensor, out_extrap: # torch.Tensor): # # h = h.unsqueeze(-1).expand(-1, -1, -1, self.group_size) # h = h[..., None].repeat(1, 1, 1, self.group_size) # torch.Size([bs, 5, 768, groups]) # w = class_embed_w.view((self.num_queries, h.shape[2], self.group_size)) # out = (h * w).sum(dim=2) + class_embed_b # out = out.view((h.shape[0], self.group_size * self.num_queries)) # return out class MLDecoder(nn.Module): def __init__(self, num_classes, num_of_groups=-1, decoder_embedding=768, initial_num_features=2048): super(MLDecoder, self).__init__() embed_len_decoder = 100 if num_of_groups < 0 else num_of_groups if embed_len_decoder > num_classes: embed_len_decoder = num_classes self.embed_len_decoder = embed_len_decoder # switching to 768 initial embeddings decoder_embedding = 768 if decoder_embedding < 0 else decoder_embedding self.embed_standart = nn.Linear(initial_num_features, decoder_embedding) # decoder decoder_dropout = 0.1 num_layers_decoder = 1 dim_feedforward = 2048 layer_decode = TransformerDecoderLayerOptimal(d_model=decoder_embedding, dim_feedforward=dim_feedforward, dropout=decoder_dropout) self.decoder = nn.TransformerDecoder(layer_decode, num_layers=num_layers_decoder) # non-learnable queries self.query_embed = nn.Embedding(embed_len_decoder, decoder_embedding) self.query_embed.requires_grad_(False) # group fully-connected self.num_classes = num_classes self.duplicate_factor = int(num_classes / embed_len_decoder + 0.999) self.duplicate_pooling = torch.nn.Parameter( torch.Tensor(embed_len_decoder, decoder_embedding, self.duplicate_factor)) self.duplicate_pooling_bias = torch.nn.Parameter(torch.Tensor(num_classes)) torch.nn.init.xavier_normal_(self.duplicate_pooling) torch.nn.init.constant_(self.duplicate_pooling_bias, 0) def forward(self, x): if len(x.shape) == 4: # [bs,2048, 7,7] embedding_spatial = x.flatten(2).transpose(1, 2) else: # [bs, 197,468] embedding_spatial = x embedding_spatial_786 = self.embed_standart(embedding_spatial) embedding_spatial_786 = torch.nn.functional.relu(embedding_spatial_786, inplace=True) bs = embedding_spatial_786.shape[0] query_embed = self.query_embed.weight # tgt = query_embed.unsqueeze(1).repeat(1, bs, 1) tgt = query_embed.unsqueeze(1).expand(-1, bs, -1) # no allocation of memory with expand h = self.decoder(tgt, embedding_spatial_786.transpose(0, 1)) # [embed_len_decoder, batch, 768] h = h.transpose(0, 1) out_extrap = torch.zeros(h.shape[0], h.shape[1], self.duplicate_factor, device=h.device, dtype=h.dtype) for i in range(self.embed_len_decoder): # group FC h_i = h[:, i, :] w_i = self.duplicate_pooling[i, :, :] out_extrap[:, i, :] = torch.matmul(h_i, w_i) h_out = out_extrap.flatten(1)[:, :self.num_classes] h_out += self.duplicate_pooling_bias logits = h_out return logits
pytorch-image-models/timm/layers/ml_decoder.py/0
{ "file_path": "pytorch-image-models/timm/layers/ml_decoder.py", "repo_id": "pytorch-image-models", "token_count": 3048 }
266
""" Split Attention Conv2d (for ResNeSt Models) Paper: `ResNeSt: Split-Attention Networks` - /https://arxiv.org/abs/2004.08955 Adapted from original PyTorch impl at https://github.com/zhanghang1989/ResNeSt Modified for torchscript compat, performance, and consistency with timm by Ross Wightman """ import torch import torch.nn.functional as F from torch import nn from .helpers import make_divisible class RadixSoftmax(nn.Module): def __init__(self, radix, cardinality): super(RadixSoftmax, self).__init__() self.radix = radix self.cardinality = cardinality def forward(self, x): batch = x.size(0) if self.radix > 1: x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2) x = F.softmax(x, dim=1) x = x.reshape(batch, -1) else: x = torch.sigmoid(x) return x class SplitAttn(nn.Module): """Split-Attention (aka Splat) """ def __init__(self, in_channels, out_channels=None, kernel_size=3, stride=1, padding=None, dilation=1, groups=1, bias=False, radix=2, rd_ratio=0.25, rd_channels=None, rd_divisor=8, act_layer=nn.ReLU, norm_layer=None, drop_layer=None, **kwargs): super(SplitAttn, self).__init__() out_channels = out_channels or in_channels self.radix = radix mid_chs = out_channels * radix if rd_channels is None: attn_chs = make_divisible(in_channels * radix * rd_ratio, min_value=32, divisor=rd_divisor) else: attn_chs = rd_channels * radix padding = kernel_size // 2 if padding is None else padding self.conv = nn.Conv2d( in_channels, mid_chs, kernel_size, stride, padding, dilation, groups=groups * radix, bias=bias, **kwargs) self.bn0 = norm_layer(mid_chs) if norm_layer else nn.Identity() self.drop = drop_layer() if drop_layer is not None else nn.Identity() self.act0 = act_layer(inplace=True) self.fc1 = nn.Conv2d(out_channels, attn_chs, 1, groups=groups) self.bn1 = norm_layer(attn_chs) if norm_layer else nn.Identity() self.act1 = act_layer(inplace=True) self.fc2 = nn.Conv2d(attn_chs, mid_chs, 1, groups=groups) self.rsoftmax = RadixSoftmax(radix, groups) def forward(self, x): x = self.conv(x) x = self.bn0(x) x = self.drop(x) x = self.act0(x) B, RC, H, W = x.shape if self.radix > 1: x = x.reshape((B, self.radix, RC // self.radix, H, W)) x_gap = x.sum(dim=1) else: x_gap = x x_gap = x_gap.mean((2, 3), keepdim=True) x_gap = self.fc1(x_gap) x_gap = self.bn1(x_gap) x_gap = self.act1(x_gap) x_attn = self.fc2(x_gap) x_attn = self.rsoftmax(x_attn).view(B, -1, 1, 1) if self.radix > 1: out = (x * x_attn.reshape((B, self.radix, RC // self.radix, 1, 1))).sum(dim=1) else: out = x * x_attn return out.contiguous()
pytorch-image-models/timm/layers/split_attn.py/0
{ "file_path": "pytorch-image-models/timm/layers/split_attn.py", "repo_id": "pytorch-image-models", "token_count": 1533 }
267
""" EfficientNet, MobileNetV3, etc Builder Assembles EfficieNet and related network feature blocks from string definitions. Handles stride, dilation calculations, and selects feature extraction points. Hacked together by / Copyright 2019, Ross Wightman """ from typing import Callable, Optional import logging import math import re from copy import deepcopy from functools import partial from typing import Any, Dict, List import torch.nn as nn from timm.layers import CondConv2d, get_condconv_initializer, get_act_layer, get_attn, make_divisible, LayerType from ._efficientnet_blocks import * from ._manipulate import named_modules __all__ = ["EfficientNetBuilder", "BlockArgs", "decode_arch_def", "efficientnet_init_weights", 'resolve_bn_args', 'resolve_act_layer', 'round_channels', 'BN_MOMENTUM_TF_DEFAULT', 'BN_EPS_TF_DEFAULT'] _logger = logging.getLogger(__name__) _DEBUG_BUILDER = False # Defaults used for Google/Tensorflow training of mobile networks /w RMSprop as per # papers and TF reference implementations. PT momentum equiv for TF decay is (1 - TF decay) # NOTE: momentum varies btw .99 and .9997 depending on source # .99 in official TF TPU impl # .9997 (/w .999 in search space) for paper BN_MOMENTUM_TF_DEFAULT = 1 - 0.99 BN_EPS_TF_DEFAULT = 1e-3 _BN_ARGS_TF = dict(momentum=BN_MOMENTUM_TF_DEFAULT, eps=BN_EPS_TF_DEFAULT) BlockArgs = List[List[Dict[str, Any]]] def get_bn_args_tf(): return _BN_ARGS_TF.copy() def resolve_bn_args(kwargs): bn_args = {} bn_momentum = kwargs.pop('bn_momentum', None) if bn_momentum is not None: bn_args['momentum'] = bn_momentum bn_eps = kwargs.pop('bn_eps', None) if bn_eps is not None: bn_args['eps'] = bn_eps return bn_args def resolve_act_layer(kwargs, default='relu'): return get_act_layer(kwargs.pop('act_layer', default)) def round_channels(channels, multiplier=1.0, divisor=8, channel_min=None, round_limit=0.9): """Round number of filters based on depth multiplier.""" if not multiplier: return channels return make_divisible(channels * multiplier, divisor, channel_min, round_limit=round_limit) def _log_info_if(msg, condition): if condition: _logger.info(msg) def _parse_ksize(ss): if ss.isdigit(): return int(ss) else: return [int(k) for k in ss.split('.')] def _decode_block_str(block_str): """ Decode block definition string Gets a list of block arg (dicts) through a string notation of arguments. E.g. ir_r2_k3_s2_e1_i32_o16_se0.25_noskip All args can exist in any order with the exception of the leading string which is assumed to indicate the block type. leading string - block type ( ir = InvertedResidual, ds = DepthwiseSep, dsa = DeptwhiseSep with pw act, cn = ConvBnAct) r - number of repeat blocks, k - kernel size, s - strides (1-9), e - expansion ratio, c - output channels, se - squeeze/excitation ratio n - activation fn ('re', 'r6', 'hs', or 'sw') Args: block_str: a string representation of block arguments. Returns: A list of block args (dicts) Raises: ValueError: if the string def not properly specified (TODO) """ assert isinstance(block_str, str) ops = block_str.split('_') block_type = ops[0] # take the block type off the front ops = ops[1:] options = {} skip = None for op in ops: # string options being checked on individual basis, combine if they grow if op == 'noskip': skip = False # force no skip connection elif op == 'skip': skip = True # force a skip connection elif op.startswith('n'): # activation fn key = op[0] v = op[1:] if v == 're': value = get_act_layer('relu') elif v == 'r6': value = get_act_layer('relu6') elif v == 'hs': value = get_act_layer('hard_swish') elif v == 'sw': value = get_act_layer('swish') # aka SiLU elif v == 'mi': value = get_act_layer('mish') else: continue options[key] = value else: # all numeric options splits = re.split(r'(\d.*)', op) if len(splits) >= 2: key, value = splits[:2] options[key] = value # if act_layer is None, the model default (passed to model init) will be used act_layer = options['n'] if 'n' in options else None start_kernel_size = _parse_ksize(options['a']) if 'a' in options else 1 end_kernel_size = _parse_ksize(options['p']) if 'p' in options else 1 force_in_chs = int(options['fc']) if 'fc' in options else 0 # FIXME hack to deal with in_chs issue in TPU def num_repeat = int(options['r']) # each type of block has different valid arguments, fill accordingly block_args = dict( block_type=block_type, out_chs=int(options['c']), stride=int(options['s']), act_layer=act_layer, ) if block_type == 'ir': block_args.update(dict( dw_kernel_size=_parse_ksize(options['k']), exp_kernel_size=start_kernel_size, pw_kernel_size=end_kernel_size, exp_ratio=float(options['e']), se_ratio=float(options.get('se', 0.)), noskip=skip is False, s2d=int(options.get('d', 0)) > 0, )) if 'cc' in options: block_args['num_experts'] = int(options['cc']) elif block_type == 'ds' or block_type == 'dsa': block_args.update(dict( dw_kernel_size=_parse_ksize(options['k']), pw_kernel_size=end_kernel_size, se_ratio=float(options.get('se', 0.)), pw_act=block_type == 'dsa', noskip=block_type == 'dsa' or skip is False, s2d=int(options.get('d', 0)) > 0, )) elif block_type == 'er': block_args.update(dict( exp_kernel_size=_parse_ksize(options['k']), pw_kernel_size=end_kernel_size, exp_ratio=float(options['e']), force_in_chs=force_in_chs, se_ratio=float(options.get('se', 0.)), noskip=skip is False, )) elif block_type == 'cn': block_args.update(dict( kernel_size=int(options['k']), skip=skip is True, )) elif block_type == 'uir': # override exp / proj kernels for start/end in uir block start_kernel_size = _parse_ksize(options['a']) if 'a' in options else 0 end_kernel_size = _parse_ksize(options['p']) if 'p' in options else 0 block_args.update(dict( dw_kernel_size_start=start_kernel_size, # overload exp ks arg for dw start dw_kernel_size_mid=_parse_ksize(options['k']), dw_kernel_size_end=end_kernel_size, # overload pw ks arg for dw end exp_ratio=float(options['e']), se_ratio=float(options.get('se', 0.)), noskip=skip is False, )) elif block_type == 'mha': kv_dim = int(options['d']) block_args.update(dict( dw_kernel_size=_parse_ksize(options['k']), num_heads=int(options['h']), key_dim=kv_dim, value_dim=kv_dim, kv_stride=int(options.get('v', 1)), noskip=skip is False, )) elif block_type == 'mqa': kv_dim = int(options['d']) block_args.update(dict( dw_kernel_size=_parse_ksize(options['k']), num_heads=int(options['h']), key_dim=kv_dim, value_dim=kv_dim, kv_stride=int(options.get('v', 1)), noskip=skip is False, )) else: assert False, 'Unknown block type (%s)' % block_type if 'gs' in options: block_args['group_size'] = int(options['gs']) return block_args, num_repeat def _scale_stage_depth(stack_args, repeats, depth_multiplier=1.0, depth_trunc='ceil'): """ Per-stage depth scaling Scales the block repeats in each stage. This depth scaling impl maintains compatibility with the EfficientNet scaling method, while allowing sensible scaling for other models that may have multiple block arg definitions in each stage. """ # We scale the total repeat count for each stage, there may be multiple # block arg defs per stage so we need to sum. num_repeat = sum(repeats) if depth_trunc == 'round': # Truncating to int by rounding allows stages with few repeats to remain # proportionally smaller for longer. This is a good choice when stage definitions # include single repeat stages that we'd prefer to keep that way as long as possible num_repeat_scaled = max(1, round(num_repeat * depth_multiplier)) else: # The default for EfficientNet truncates repeats to int via 'ceil'. # Any multiplier > 1.0 will result in an increased depth for every stage. num_repeat_scaled = int(math.ceil(num_repeat * depth_multiplier)) # Proportionally distribute repeat count scaling to each block definition in the stage. # Allocation is done in reverse as it results in the first block being less likely to be scaled. # The first block makes less sense to repeat in most of the arch definitions. repeats_scaled = [] for r in repeats[::-1]: rs = max(1, round((r / num_repeat * num_repeat_scaled))) repeats_scaled.append(rs) num_repeat -= r num_repeat_scaled -= rs repeats_scaled = repeats_scaled[::-1] # Apply the calculated scaling to each block arg in the stage sa_scaled = [] for ba, rep in zip(stack_args, repeats_scaled): sa_scaled.extend([deepcopy(ba) for _ in range(rep)]) return sa_scaled def decode_arch_def( arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False, group_size=None, ): """ Decode block architecture definition strings -> block kwargs Args: arch_def: architecture definition strings, list of list of strings depth_multiplier: network depth multiplier depth_trunc: networ depth truncation mode when applying multiplier experts_multiplier: CondConv experts multiplier fix_first_last: fix first and last block depths when multiplier is applied group_size: group size override for all blocks that weren't explicitly set in arch string Returns: list of list of block kwargs """ arch_args = [] if isinstance(depth_multiplier, tuple): assert len(depth_multiplier) == len(arch_def) else: depth_multiplier = (depth_multiplier,) * len(arch_def) for stack_idx, (block_strings, multiplier) in enumerate(zip(arch_def, depth_multiplier)): assert isinstance(block_strings, list) stack_args = [] repeats = [] for block_str in block_strings: assert isinstance(block_str, str) ba, rep = _decode_block_str(block_str) if ba.get('num_experts', 0) > 0 and experts_multiplier > 1: ba['num_experts'] *= experts_multiplier if group_size is not None: ba.setdefault('group_size', group_size) stack_args.append(ba) repeats.append(rep) if fix_first_last and (stack_idx == 0 or stack_idx == len(arch_def) - 1): arch_args.append(_scale_stage_depth(stack_args, repeats, 1.0, depth_trunc)) else: arch_args.append(_scale_stage_depth(stack_args, repeats, multiplier, depth_trunc)) return arch_args class EfficientNetBuilder: """ Build Trunk Blocks This ended up being somewhat of a cross between https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_models.py and https://github.com/facebookresearch/maskrcnn-benchmark/blob/master/maskrcnn_benchmark/modeling/backbone/fbnet_builder.py """ def __init__( self, output_stride: int = 32, pad_type: str = '', round_chs_fn: Callable = round_channels, se_from_exp: bool = False, act_layer: Optional[LayerType] = None, norm_layer: Optional[LayerType] = None, aa_layer: Optional[LayerType] = None, se_layer: Optional[LayerType] = None, drop_path_rate: float = 0., layer_scale_init_value: Optional[float] = None, feature_location: str = '', ): self.output_stride = output_stride self.pad_type = pad_type self.round_chs_fn = round_chs_fn self.se_from_exp = se_from_exp # calculate se channel reduction from expanded (mid) chs self.act_layer = act_layer self.norm_layer = norm_layer self.aa_layer = aa_layer self.se_layer = get_attn(se_layer) try: self.se_layer(8, rd_ratio=1.0) # test if attn layer accepts rd_ratio arg self.se_has_ratio = True except TypeError: self.se_has_ratio = False self.drop_path_rate = drop_path_rate self.layer_scale_init_value = layer_scale_init_value if feature_location == 'depthwise': # old 'depthwise' mode renamed 'expansion' to match TF impl, old expansion mode didn't make sense _logger.warning("feature_location=='depthwise' is deprecated, using 'expansion'") feature_location = 'expansion' self.feature_location = feature_location assert feature_location in ('bottleneck', 'expansion', '') self.verbose = _DEBUG_BUILDER # state updated during build, consumed by model self.in_chs = None self.features = [] def _make_block(self, ba, block_idx, block_count): drop_path_rate = self.drop_path_rate * block_idx / block_count bt = ba.pop('block_type') ba['in_chs'] = self.in_chs ba['out_chs'] = self.round_chs_fn(ba['out_chs']) s2d = ba.get('s2d', 0) if s2d > 0: # adjust while space2depth active ba['out_chs'] *= 4 if 'force_in_chs' in ba and ba['force_in_chs']: # NOTE this is a hack to work around mismatch in TF EdgeEffNet impl ba['force_in_chs'] = self.round_chs_fn(ba['force_in_chs']) ba['pad_type'] = self.pad_type # block act fn overrides the model default ba['act_layer'] = ba['act_layer'] if ba['act_layer'] is not None else self.act_layer assert ba['act_layer'] is not None ba['norm_layer'] = self.norm_layer ba['drop_path_rate'] = drop_path_rate if self.aa_layer is not None: ba['aa_layer'] = self.aa_layer se_ratio = ba.pop('se_ratio', None) if se_ratio and self.se_layer is not None: if not self.se_from_exp: # adjust se_ratio by expansion ratio if calculating se channels from block input se_ratio /= ba.get('exp_ratio', 1.0) if s2d == 1: # adjust for start of space2depth se_ratio /= 4 if self.se_has_ratio: ba['se_layer'] = partial(self.se_layer, rd_ratio=se_ratio) else: ba['se_layer'] = self.se_layer if bt == 'ir': _log_info_if(' InvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = CondConvResidual(**ba) if ba.get('num_experts', 0) else InvertedResidual(**ba) elif bt == 'ds' or bt == 'dsa': _log_info_if(' DepthwiseSeparable {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = DepthwiseSeparableConv(**ba) elif bt == 'er': _log_info_if(' EdgeResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = EdgeResidual(**ba) elif bt == 'cn': _log_info_if(' ConvBnAct {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = ConvBnAct(**ba) elif bt == 'uir': _log_info_if(' UniversalInvertedResidual {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = UniversalInvertedResidual(**ba, layer_scale_init_value=self.layer_scale_init_value) elif bt == 'mqa': _log_info_if(' MobileMultiQueryAttention {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = MobileAttention(**ba, use_multi_query=True, layer_scale_init_value=self.layer_scale_init_value) elif bt == 'mha': _log_info_if(' MobileMultiHeadAttention {}, Args: {}'.format(block_idx, str(ba)), self.verbose) block = MobileAttention(**ba, layer_scale_init_value=self.layer_scale_init_value) else: assert False, 'Unknown block type (%s) while building model.' % bt self.in_chs = ba['out_chs'] # update in_chs for arg of next block return block def __call__(self, in_chs, model_block_args): """ Build the blocks Args: in_chs: Number of input-channels passed to first block model_block_args: A list of lists, outer list defines stages, inner list contains strings defining block configuration(s) Return: List of block stacks (each stack wrapped in nn.Sequential) """ _log_info_if('Building model trunk with %d stages...' % len(model_block_args), self.verbose) self.in_chs = in_chs total_block_count = sum([len(x) for x in model_block_args]) total_block_idx = 0 current_stride = 2 current_dilation = 1 stages = [] if model_block_args[0][0]['stride'] > 1: # if the first block starts with a stride, we need to extract first level feat from stem feature_info = dict(module='bn1', num_chs=in_chs, stage=0, reduction=current_stride) self.features.append(feature_info) # outer list of block_args defines the stacks space2depth = 0 for stack_idx, stack_args in enumerate(model_block_args): last_stack = stack_idx + 1 == len(model_block_args) _log_info_if('Stack: {}'.format(stack_idx), self.verbose) assert isinstance(stack_args, list) blocks = [] # each stack (stage of blocks) contains a list of block arguments for block_idx, block_args in enumerate(stack_args): last_block = block_idx + 1 == len(stack_args) _log_info_if(' Block: {}'.format(block_idx), self.verbose) assert block_args['stride'] in (1, 2) if block_idx >= 1: # only the first block in any stack can have a stride > 1 block_args['stride'] = 1 if not space2depth and block_args.pop('s2d', False): assert block_args['stride'] == 1 space2depth = 1 if space2depth > 0: # FIXME s2d is a WIP if space2depth == 2 and block_args['stride'] == 2: block_args['stride'] = 1 # to end s2d region, need to correct expansion and se ratio relative to input block_args['exp_ratio'] /= 4 space2depth = 0 else: block_args['s2d'] = space2depth extract_features = False if last_block: next_stack_idx = stack_idx + 1 extract_features = next_stack_idx >= len(model_block_args) or \ model_block_args[next_stack_idx][0]['stride'] > 1 next_dilation = current_dilation if block_args['stride'] > 1: next_output_stride = current_stride * block_args['stride'] if next_output_stride > self.output_stride: next_dilation = current_dilation * block_args['stride'] block_args['stride'] = 1 _log_info_if(' Converting stride to dilation to maintain output_stride=={}'.format( self.output_stride), self.verbose) else: current_stride = next_output_stride block_args['dilation'] = current_dilation if next_dilation != current_dilation: current_dilation = next_dilation # create the block block = self._make_block(block_args, total_block_idx, total_block_count) blocks.append(block) if space2depth == 1: space2depth = 2 # stash feature module name and channel info for model feature extraction if extract_features: feature_info = dict( stage=stack_idx + 1, reduction=current_stride, **block.feature_info(self.feature_location), ) leaf_name = feature_info.get('module', '') if leaf_name: feature_info['module'] = '.'.join([f'blocks.{stack_idx}.{block_idx}', leaf_name]) else: assert last_block feature_info['module'] = f'blocks.{stack_idx}' self.features.append(feature_info) total_block_idx += 1 # incr global block idx (across all stacks) stages.append(nn.Sequential(*blocks)) return stages def _init_weight_goog(m, n='', fix_group_fanout=True): """ Weight initialization as per Tensorflow official implementations. Args: m (nn.Module): module to init n (str): module name fix_group_fanout (bool): enable correct (matching Tensorflow TPU impl) fanout calculation w/ group convs Handles layers in EfficientNet, EfficientNet-CondConv, MixNet, MnasNet, MobileNetV3, etc: * https://github.com/tensorflow/tpu/blob/master/models/official/mnasnet/mnasnet_model.py * https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/efficientnet_model.py """ if isinstance(m, CondConv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels if fix_group_fanout: fan_out //= m.groups init_weight_fn = get_condconv_initializer( lambda w: nn.init.normal_(w, 0, math.sqrt(2.0 / fan_out)), m.num_experts, m.weight_shape) init_weight_fn(m.weight) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.Conv2d): fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels if fix_group_fanout: fan_out //= m.groups nn.init.normal_(m.weight, 0, math.sqrt(2.0 / fan_out)) if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): fan_out = m.weight.size(0) # fan-out fan_in = 0 if 'routing_fn' in n: fan_in = m.weight.size(1) init_range = 1.0 / math.sqrt(fan_in + fan_out) nn.init.uniform_(m.weight, -init_range, init_range) nn.init.zeros_(m.bias) def efficientnet_init_weights(model: nn.Module, init_fn=None): init_fn = init_fn or _init_weight_goog for n, m in model.named_modules(): init_fn(m, n) # iterate and call any module.init_weights() fn, children first for n, m in named_modules(model): if hasattr(m, 'init_weights'): m.init_weights()
pytorch-image-models/timm/models/_efficientnet_builder.py/0
{ "file_path": "pytorch-image-models/timm/models/_efficientnet_builder.py", "repo_id": "pytorch-image-models", "token_count": 10990 }
268
""" Bring-Your-Own-Attention Network A flexible network w/ dataclass based config for stacking NN blocks including self-attention (or similar) layers. Currently used to implement experimental variants of: * Bottleneck Transformers * Lambda ResNets * HaloNets Consider all of the models definitions here as experimental WIP and likely to change. Hacked together by / copyright Ross Wightman, 2021. """ from typing import Any, Dict, Optional from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .byobnet import ByoBlockCfg, ByoModelCfg, ByobNet, interleave_blocks __all__ = [] model_cfgs = dict( botnet26t=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', fixed_input_size=True, self_attn_layer='bottleneck', self_attn_kwargs=dict() ), sebotnet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333), ), stem_chs=64, stem_type='tiered', stem_pool='', act_layer='silu', num_features=1280, attn_layer='se', self_attn_layer='bottleneck', self_attn_kwargs=dict() ), botnet50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', fixed_input_size=True, self_attn_layer='bottleneck', self_attn_kwargs=dict() ), eca_botnext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', fixed_input_size=True, act_layer='silu', attn_layer='eca', self_attn_layer='bottleneck', self_attn_kwargs=dict(dim_head=16) ), halonet_h1=ByoModelCfg( blocks=( ByoBlockCfg(type='self_attn', d=3, c=64, s=1, gs=0, br=1.0), ByoBlockCfg(type='self_attn', d=3, c=128, s=2, gs=0, br=1.0), ByoBlockCfg(type='self_attn', d=10, c=256, s=2, gs=0, br=1.0), ByoBlockCfg(type='self_attn', d=3, c=512, s=2, gs=0, br=1.0), ), stem_chs=64, stem_type='7x7', stem_pool='maxpool', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3), ), halonet26t=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=2) ), sehalonet33ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=[2], d=3, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg('self_attn', d=2, c=1536, s=2, gs=0, br=0.333), ), stem_chs=64, stem_type='tiered', stem_pool='', act_layer='silu', num_features=1280, attn_layer='se', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3) ), halonet50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks( types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25, self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3, num_heads=4)), interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=3) ), eca_halonext26ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=16, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=16, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=16, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=16, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', attn_layer='eca', self_attn_layer='halo', self_attn_kwargs=dict(block_size=8, halo_size=2, dim_head=16) ), lambda_resnet26t=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', self_attn_layer='lambda', self_attn_kwargs=dict(r=9) ), lambda_resnet50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), every=4, d=4, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', act_layer='silu', self_attn_layer='lambda', self_attn_kwargs=dict(r=9) ), lambda_resnet26rpt_256=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=256, s=1, gs=0, br=0.25), ByoBlockCfg(type='bottle', d=2, c=512, s=2, gs=0, br=0.25), interleave_blocks(types=('bottle', 'self_attn'), d=2, c=1024, s=2, gs=0, br=0.25), ByoBlockCfg(type='self_attn', d=2, c=2048, s=2, gs=0, br=0.25), ), stem_chs=64, stem_type='tiered', stem_pool='maxpool', self_attn_layer='lambda', self_attn_kwargs=dict(r=None) ), # experimental haloregnetz_b=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=2, c=48, s=2, gs=16, br=3), ByoBlockCfg(type='bottle', d=6, c=96, s=2, gs=16, br=3), interleave_blocks(types=('bottle', 'self_attn'), every=3, d=12, c=192, s=2, gs=16, br=3), ByoBlockCfg('self_attn', d=2, c=288, s=2, gs=16, br=3), ), stem_chs=32, stem_pool='', downsample='', num_features=1536, act_layer='silu', attn_layer='se', attn_kwargs=dict(rd_ratio=0.25), block_kwargs=dict(bottle_in=True, linear_out=True), self_attn_layer='halo', self_attn_kwargs=dict(block_size=7, halo_size=2, qk_ratio=0.33) ), # experimental lamhalobotnet50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks( types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25, self_attn_layer='lambda', self_attn_kwargs=dict(r=13)), interleave_blocks( types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25, self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), interleave_blocks( types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25, self_attn_layer='bottleneck', self_attn_kwargs=dict()), ), stem_chs=64, stem_type='tiered', stem_pool='', act_layer='silu', ), halo2botnet50ts=ByoModelCfg( blocks=( ByoBlockCfg(type='bottle', d=3, c=256, s=1, gs=0, br=0.25), interleave_blocks( types=('bottle', 'self_attn'), d=4, c=512, s=2, gs=0, br=0.25, self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), interleave_blocks( types=('bottle', 'self_attn'), d=6, c=1024, s=2, gs=0, br=0.25, self_attn_layer='halo', self_attn_kwargs=dict(halo_size=3)), interleave_blocks( types=('bottle', 'self_attn'), d=3, c=2048, s=2, gs=0, br=0.25, self_attn_layer='bottleneck', self_attn_kwargs=dict()), ), stem_chs=64, stem_type='tiered', stem_pool='', act_layer='silu', ), ) def _create_byoanet(variant: str, cfg_variant: Optional[str] = None, pretrained: bool = False, **kwargs) -> ByobNet: """Create a Bring-Your-Own-Attention network model. Args: variant: Model variant name. cfg_variant: Config variant name if different from model variant. pretrained: Load pretrained weights. **kwargs: Additional model arguments. Returns: Instantiated ByobNet model. """ return build_model_with_cfg( ByobNet, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), **kwargs, ) def _cfg(url: str = '', **kwargs) -> Dict[str, Any]: """Generate default model configuration. Args: url: URL for pretrained weights. **kwargs: Override default configuration values. Returns: Model configuration dictionary. """ return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'stem.conv1.conv', 'classifier': 'head.fc', 'fixed_input_size': False, 'min_input_size': (3, 224, 224), **kwargs } default_cfgs = generate_default_cfgs({ # GPU-Efficient (ResNet) weights 'botnet26t_256.c1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/botnet26t_c1_256-167a0e9f.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'sebotnet33ts_256.a1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sebotnet33ts_a1h2_256-957e3c3e.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), 'botnet50ts_256.untrained': _cfg( fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'eca_botnext26ts_256.c1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_botnext26ts_c_256-95a898f6.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'halonet_h1.untrained': _cfg(input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), 'halonet26t.a1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet26t_a1h_256-3083328c.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256)), 'sehalonet33ts.ra2_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/sehalonet33ts_256-87e053f9.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), 'halonet50ts.a1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halonet50ts_a1h2_256-f3a3daee.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), 'eca_halonext26ts.c1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/eca_halonext26ts_c_256-06906299.pth', hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8), min_input_size=(3, 256, 256), crop_pct=0.94), 'lambda_resnet26t.c1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26t_c_256-e5a5c857.pth', hf_hub_id='timm/', min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), 'lambda_resnet50ts.a1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet50ts_a1h_256-b87370f7.pth', hf_hub_id='timm/', min_input_size=(3, 128, 128), input_size=(3, 256, 256), pool_size=(8, 8)), 'lambda_resnet26rpt_256.c1_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lambda_resnet26rpt_c_256-ab00292d.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8), crop_pct=0.94), 'haloregnetz_b.ra3_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/haloregnetz_c_raa_256-c8ad7616.pth', hf_hub_id='timm/', mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), first_conv='stem.conv', input_size=(3, 224, 224), pool_size=(7, 7), min_input_size=(3, 224, 224), crop_pct=0.94), 'lamhalobotnet50ts_256.a1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/lamhalobotnet50ts_a1h2_256-fe3d9445.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), 'halo2botnet50ts_256.a1h_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-attn-weights/halo2botnet50ts_a1h2_256-fd9c11a3.pth', hf_hub_id='timm/', fixed_input_size=True, input_size=(3, 256, 256), pool_size=(8, 8)), }) @register_model def botnet26t_256(pretrained: bool = False, **kwargs) -> ByobNet: """ Bottleneck Transformer w/ ResNet26-T backbone. """ kwargs.setdefault('img_size', 256) return _create_byoanet('botnet26t_256', 'botnet26t', pretrained=pretrained, **kwargs) @register_model def sebotnet33ts_256(pretrained: bool = False, **kwargs) -> ByobNet: """ Bottleneck Transformer w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, """ return _create_byoanet('sebotnet33ts_256', 'sebotnet33ts', pretrained=pretrained, **kwargs) @register_model def botnet50ts_256(pretrained: bool = False, **kwargs) -> ByobNet: """ Bottleneck Transformer w/ ResNet50-T backbone, silu act. """ kwargs.setdefault('img_size', 256) return _create_byoanet('botnet50ts_256', 'botnet50ts', pretrained=pretrained, **kwargs) @register_model def eca_botnext26ts_256(pretrained: bool = False, **kwargs) -> ByobNet: """ Bottleneck Transformer w/ ResNet26-T backbone, silu act. """ kwargs.setdefault('img_size', 256) return _create_byoanet('eca_botnext26ts_256', 'eca_botnext26ts', pretrained=pretrained, **kwargs) @register_model def halonet_h1(pretrained: bool = False, **kwargs) -> ByobNet: """ HaloNet-H1. Halo attention in all stages as per the paper. NOTE: This runs very slowly! """ return _create_byoanet('halonet_h1', pretrained=pretrained, **kwargs) @register_model def halonet26t(pretrained: bool = False, **kwargs) -> ByobNet: """ HaloNet w/ a ResNet26-t backbone. Halo attention in final two stages """ return _create_byoanet('halonet26t', pretrained=pretrained, **kwargs) @register_model def sehalonet33ts(pretrained: bool = False, **kwargs) -> ByobNet: """ HaloNet w/ a ResNet33-t backbone, SE attn for non Halo blocks, SiLU, 1-2 Halo in stage 2,3,4. """ return _create_byoanet('sehalonet33ts', pretrained=pretrained, **kwargs) @register_model def halonet50ts(pretrained: bool = False, **kwargs) -> ByobNet: """ HaloNet w/ a ResNet50-t backbone, silu act. Halo attention in final two stages """ return _create_byoanet('halonet50ts', pretrained=pretrained, **kwargs) @register_model def eca_halonext26ts(pretrained: bool = False, **kwargs) -> ByobNet: """ HaloNet w/ a ResNet26-t backbone, silu act. Halo attention in final two stages """ return _create_byoanet('eca_halonext26ts', pretrained=pretrained, **kwargs) @register_model def lambda_resnet26t(pretrained: bool = False, **kwargs) -> ByobNet: """ Lambda-ResNet-26-T. Lambda layers w/ conv pos in last two stages. """ return _create_byoanet('lambda_resnet26t', pretrained=pretrained, **kwargs) @register_model def lambda_resnet50ts(pretrained: bool = False, **kwargs) -> ByobNet: """ Lambda-ResNet-50-TS. SiLU act. Lambda layers w/ conv pos in last two stages. """ return _create_byoanet('lambda_resnet50ts', pretrained=pretrained, **kwargs) @register_model def lambda_resnet26rpt_256(pretrained: bool = False, **kwargs) -> ByobNet: """ Lambda-ResNet-26-R-T. Lambda layers w/ rel pos embed in last two stages. """ kwargs.setdefault('img_size', 256) return _create_byoanet('lambda_resnet26rpt_256', pretrained=pretrained, **kwargs) @register_model def haloregnetz_b(pretrained: bool = False, **kwargs) -> ByobNet: """ Halo + RegNetZ """ return _create_byoanet('haloregnetz_b', pretrained=pretrained, **kwargs) @register_model def lamhalobotnet50ts_256(pretrained: bool = False, **kwargs) -> ByobNet: """ Combo Attention (Lambda + Halo + Bot) Network """ return _create_byoanet('lamhalobotnet50ts_256', 'lamhalobotnet50ts', pretrained=pretrained, **kwargs) @register_model def halo2botnet50ts_256(pretrained: bool = False, **kwargs) -> ByobNet: """ Combo Attention (Halo + Halo + Bot) Network """ return _create_byoanet('halo2botnet50ts_256', 'halo2botnet50ts', pretrained=pretrained, **kwargs)
pytorch-image-models/timm/models/byoanet.py/0
{ "file_path": "pytorch-image-models/timm/models/byoanet.py", "repo_id": "pytorch-image-models", "token_count": 9964 }
269
""" EfficientFormer-V2 @article{ li2022rethinking, title={Rethinking Vision Transformers for MobileNet Size and Speed}, author={Li, Yanyu and Hu, Ju and Wen, Yang and Evangelidis, Georgios and Salahi, Kamyar and Wang, Yanzhi and Tulyakov, Sergey and Ren, Jian}, journal={arXiv preprint arXiv:2212.08059}, year={2022} } Significantly refactored and cleaned up for timm from original at: https://github.com/snap-research/EfficientFormer Original code licensed Apache 2.0, Copyright (c) 2022 Snap Inc. Modifications and timm support by / Copyright 2023, Ross Wightman """ import math from functools import partial from typing import Dict, List, Optional, Tuple, Union import torch import torch.nn as nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import create_conv2d, create_norm_layer, get_act_layer, get_norm_layer, ConvNormAct from timm.layers import DropPath, trunc_normal_, to_2tuple, to_ntuple, ndgrid from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint_seq from ._registry import generate_default_cfgs, register_model __all__ = ['EfficientFormerV2'] EfficientFormer_width = { 'L': (40, 80, 192, 384), # 26m 83.3% 6attn 'S2': (32, 64, 144, 288), # 12m 81.6% 4attn dp0.02 'S1': (32, 48, 120, 224), # 6.1m 79.0 'S0': (32, 48, 96, 176), # 75.0 75.7 } EfficientFormer_depth = { 'L': (5, 5, 15, 10), # 26m 83.3% 'S2': (4, 4, 12, 8), # 12m 'S1': (3, 3, 9, 6), # 79.0 'S0': (2, 2, 6, 4), # 75.7 } EfficientFormer_expansion_ratios = { 'L': (4, 4, (4, 4, 4, 4, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4), (4, 4, 4, 3, 3, 3, 3, 4, 4, 4)), 'S2': (4, 4, (4, 4, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4), (4, 4, 3, 3, 3, 3, 4, 4)), 'S1': (4, 4, (4, 4, 3, 3, 3, 3, 4, 4, 4), (4, 4, 3, 3, 4, 4)), 'S0': (4, 4, (4, 3, 3, 3, 4, 4), (4, 3, 3, 4)), } class ConvNorm(nn.Module): def __init__( self, in_channels, out_channels, kernel_size=1, stride=1, padding='', dilation=1, groups=1, bias=True, norm_layer='batchnorm2d', norm_kwargs=None, ): norm_kwargs = norm_kwargs or {} super(ConvNorm, self).__init__() self.conv = create_conv2d( in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias, ) self.bn = create_norm_layer(norm_layer, out_channels, **norm_kwargs) def forward(self, x): x = self.conv(x) x = self.bn(x) return x class Attention2d(torch.nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__( self, dim=384, key_dim=32, num_heads=8, attn_ratio=4, resolution=7, act_layer=nn.GELU, stride=None, ): super().__init__() self.num_heads = num_heads self.scale = key_dim ** -0.5 self.key_dim = key_dim resolution = to_2tuple(resolution) if stride is not None: resolution = tuple([math.ceil(r / stride) for r in resolution]) self.stride_conv = ConvNorm(dim, dim, kernel_size=3, stride=stride, groups=dim) self.upsample = nn.Upsample(scale_factor=stride, mode='bilinear') else: self.stride_conv = None self.upsample = None self.resolution = resolution self.N = self.resolution[0] * self.resolution[1] self.d = int(attn_ratio * key_dim) self.dh = int(attn_ratio * key_dim) * num_heads self.attn_ratio = attn_ratio kh = self.key_dim * self.num_heads self.q = ConvNorm(dim, kh) self.k = ConvNorm(dim, kh) self.v = ConvNorm(dim, self.dh) self.v_local = ConvNorm(self.dh, self.dh, kernel_size=3, groups=self.dh) self.talking_head1 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1) self.talking_head2 = nn.Conv2d(self.num_heads, self.num_heads, kernel_size=1) self.act = act_layer() self.proj = ConvNorm(self.dh, dim, 1) pos = torch.stack(ndgrid(torch.arange(self.resolution[0]), torch.arange(self.resolution[1]))).flatten(1) rel_pos = (pos[..., :, None] - pos[..., None, :]).abs() rel_pos = (rel_pos[0] * self.resolution[1]) + rel_pos[1] self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, self.N)) self.register_buffer('attention_bias_idxs', torch.LongTensor(rel_pos), persistent=False) self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat) @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): B, C, H, W = x.shape if self.stride_conv is not None: x = self.stride_conv(x) q = self.q(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) k = self.k(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 2, 3) v = self.v(x) v_local = self.v_local(v) v = v.reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) attn = (q @ k) * self.scale attn = attn + self.get_attention_biases(x.device) attn = self.talking_head1(attn) attn = attn.softmax(dim=-1) attn = self.talking_head2(attn) x = (attn @ v).transpose(2, 3) x = x.reshape(B, self.dh, self.resolution[0], self.resolution[1]) + v_local if self.upsample is not None: x = self.upsample(x) x = self.act(x) x = self.proj(x) return x class LocalGlobalQuery(torch.nn.Module): def __init__(self, in_dim, out_dim): super().__init__() self.pool = nn.AvgPool2d(1, 2, 0) self.local = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=2, padding=1, groups=in_dim) self.proj = ConvNorm(in_dim, out_dim, 1) def forward(self, x): local_q = self.local(x) pool_q = self.pool(x) q = local_q + pool_q q = self.proj(q) return q class Attention2dDownsample(torch.nn.Module): attention_bias_cache: Dict[str, torch.Tensor] def __init__( self, dim=384, key_dim=16, num_heads=8, attn_ratio=4, resolution=7, out_dim=None, act_layer=nn.GELU, ): super().__init__() self.num_heads = num_heads self.scale = key_dim ** -0.5 self.key_dim = key_dim self.resolution = to_2tuple(resolution) self.resolution2 = tuple([math.ceil(r / 2) for r in self.resolution]) self.N = self.resolution[0] * self.resolution[1] self.N2 = self.resolution2[0] * self.resolution2[1] self.d = int(attn_ratio * key_dim) self.dh = int(attn_ratio * key_dim) * num_heads self.attn_ratio = attn_ratio self.out_dim = out_dim or dim kh = self.key_dim * self.num_heads self.q = LocalGlobalQuery(dim, kh) self.k = ConvNorm(dim, kh, 1) self.v = ConvNorm(dim, self.dh, 1) self.v_local = ConvNorm(self.dh, self.dh, kernel_size=3, stride=2, groups=self.dh) self.act = act_layer() self.proj = ConvNorm(self.dh, self.out_dim, 1) self.attention_biases = nn.Parameter(torch.zeros(num_heads, self.N)) k_pos = torch.stack(ndgrid(torch.arange(self.resolution[0]), torch.arange(self.resolution[1]))).flatten(1) q_pos = torch.stack(ndgrid( torch.arange(0, self.resolution[0], step=2), torch.arange(0, self.resolution[1], step=2) )).flatten(1) rel_pos = (q_pos[..., :, None] - k_pos[..., None, :]).abs() rel_pos = (rel_pos[0] * self.resolution[1]) + rel_pos[1] self.register_buffer('attention_bias_idxs', rel_pos, persistent=False) self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat) @torch.no_grad() def train(self, mode=True): super().train(mode) if mode and self.attention_bias_cache: self.attention_bias_cache = {} # clear ab cache def get_attention_biases(self, device: torch.device) -> torch.Tensor: if torch.jit.is_tracing() or self.training: return self.attention_biases[:, self.attention_bias_idxs] else: device_key = str(device) if device_key not in self.attention_bias_cache: self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs] return self.attention_bias_cache[device_key] def forward(self, x): B, C, H, W = x.shape q = self.q(x).reshape(B, self.num_heads, -1, self.N2).permute(0, 1, 3, 2) k = self.k(x).reshape(B, self.num_heads, -1, self.N).permute(0, 1, 2, 3) v = self.v(x) v_local = self.v_local(v) v = v.reshape(B, self.num_heads, -1, self.N).permute(0, 1, 3, 2) attn = (q @ k) * self.scale attn = attn + self.get_attention_biases(x.device) attn = attn.softmax(dim=-1) x = (attn @ v).transpose(2, 3) x = x.reshape(B, self.dh, self.resolution2[0], self.resolution2[1]) + v_local x = self.act(x) x = self.proj(x) return x class Downsample(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=3, stride=2, padding=1, resolution=7, use_attn=False, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, ): super().__init__() kernel_size = to_2tuple(kernel_size) stride = to_2tuple(stride) padding = to_2tuple(padding) norm_layer = norm_layer or nn.Identity() self.conv = ConvNorm( in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding, norm_layer=norm_layer, ) if use_attn: self.attn = Attention2dDownsample( dim=in_chs, out_dim=out_chs, resolution=resolution, act_layer=act_layer, ) else: self.attn = None def forward(self, x): out = self.conv(x) if self.attn is not None: return self.attn(x) + out return out class ConvMlpWithNorm(nn.Module): """ Implementation of MLP with 1*1 convolutions. Input: tensor with shape [B, C, H, W] """ def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, drop=0., mid_conv=False, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = ConvNormAct( in_features, hidden_features, 1, bias=True, norm_layer=norm_layer, act_layer=act_layer) if mid_conv: self.mid = ConvNormAct( hidden_features, hidden_features, 3, groups=hidden_features, bias=True, norm_layer=norm_layer, act_layer=act_layer) else: self.mid = nn.Identity() self.drop1 = nn.Dropout(drop) self.fc2 = ConvNorm(hidden_features, out_features, 1, norm_layer=norm_layer) self.drop2 = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.mid(x) x = self.drop1(x) x = self.fc2(x) x = self.drop2(x) return x class LayerScale2d(nn.Module): def __init__(self, dim, init_values=1e-5, inplace=False): super().__init__() self.inplace = inplace self.gamma = nn.Parameter(init_values * torch.ones(dim)) def forward(self, x): gamma = self.gamma.view(1, -1, 1, 1) return x.mul_(gamma) if self.inplace else x * gamma class EfficientFormerV2Block(nn.Module): def __init__( self, dim, mlp_ratio=4., act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, proj_drop=0., drop_path=0., layer_scale_init_value=1e-5, resolution=7, stride=None, use_attn=True, ): super().__init__() if use_attn: self.token_mixer = Attention2d( dim, resolution=resolution, act_layer=act_layer, stride=stride, ) self.ls1 = LayerScale2d( dim, layer_scale_init_value) if layer_scale_init_value is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity() else: self.token_mixer = None self.ls1 = None self.drop_path1 = None self.mlp = ConvMlpWithNorm( in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, norm_layer=norm_layer, drop=proj_drop, mid_conv=True, ) self.ls2 = LayerScale2d( dim, layer_scale_init_value) if layer_scale_init_value is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity() def forward(self, x): if self.token_mixer is not None: x = x + self.drop_path1(self.ls1(self.token_mixer(x))) x = x + self.drop_path2(self.ls2(self.mlp(x))) return x class Stem4(nn.Sequential): def __init__(self, in_chs, out_chs, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d): super().__init__() self.stride = 4 self.conv1 = ConvNormAct( in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1, bias=True, norm_layer=norm_layer, act_layer=act_layer ) self.conv2 = ConvNormAct( out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1, bias=True, norm_layer=norm_layer, act_layer=act_layer ) class EfficientFormerV2Stage(nn.Module): def __init__( self, dim, dim_out, depth, resolution=7, downsample=True, block_stride=None, downsample_use_attn=False, block_use_attn=False, num_vit=1, mlp_ratio=4., proj_drop=.0, drop_path=0., layer_scale_init_value=1e-5, act_layer=nn.GELU, norm_layer=nn.BatchNorm2d, ): super().__init__() self.grad_checkpointing = False mlp_ratio = to_ntuple(depth)(mlp_ratio) resolution = to_2tuple(resolution) if downsample: self.downsample = Downsample( dim, dim_out, use_attn=downsample_use_attn, resolution=resolution, norm_layer=norm_layer, act_layer=act_layer, ) dim = dim_out resolution = tuple([math.ceil(r / 2) for r in resolution]) else: assert dim == dim_out self.downsample = nn.Identity() blocks = [] for block_idx in range(depth): remain_idx = depth - num_vit - 1 b = EfficientFormerV2Block( dim, resolution=resolution, stride=block_stride, mlp_ratio=mlp_ratio[block_idx], use_attn=block_use_attn and block_idx > remain_idx, proj_drop=proj_drop, drop_path=drop_path[block_idx], layer_scale_init_value=layer_scale_init_value, act_layer=act_layer, norm_layer=norm_layer, ) blocks += [b] self.blocks = nn.Sequential(*blocks) def forward(self, x): x = self.downsample(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x) else: x = self.blocks(x) return x class EfficientFormerV2(nn.Module): def __init__( self, depths, in_chans=3, img_size=224, global_pool='avg', embed_dims=None, downsamples=None, mlp_ratios=4, norm_layer='batchnorm2d', norm_eps=1e-5, act_layer='gelu', num_classes=1000, drop_rate=0., proj_drop_rate=0., drop_path_rate=0., layer_scale_init_value=1e-5, num_vit=0, distillation=True, ): super().__init__() assert global_pool in ('avg', '') self.num_classes = num_classes self.global_pool = global_pool self.feature_info = [] img_size = to_2tuple(img_size) norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps) act_layer = get_act_layer(act_layer) self.stem = Stem4(in_chans, embed_dims[0], act_layer=act_layer, norm_layer=norm_layer) prev_dim = embed_dims[0] stride = 4 num_stages = len(depths) dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)] downsamples = downsamples or (False,) + (True,) * (len(depths) - 1) mlp_ratios = to_ntuple(num_stages)(mlp_ratios) stages = [] for i in range(num_stages): curr_resolution = tuple([math.ceil(s / stride) for s in img_size]) stage = EfficientFormerV2Stage( prev_dim, embed_dims[i], depth=depths[i], resolution=curr_resolution, downsample=downsamples[i], block_stride=2 if i == 2 else None, downsample_use_attn=i >= 3, block_use_attn=i >= 2, num_vit=num_vit, mlp_ratio=mlp_ratios[i], proj_drop=proj_drop_rate, drop_path=dpr[i], layer_scale_init_value=layer_scale_init_value, act_layer=act_layer, norm_layer=norm_layer, ) if downsamples[i]: stride *= 2 prev_dim = embed_dims[i] self.feature_info += [dict(num_chs=prev_dim, reduction=stride, module=f'stages.{i}')] stages.append(stage) self.stages = nn.Sequential(*stages) # Classifier head self.num_features = self.head_hidden_size = embed_dims[-1] self.norm = norm_layer(embed_dims[-1]) self.head_drop = nn.Dropout(drop_rate) self.head = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() self.dist = distillation if self.dist: self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity() else: self.head_dist = None self.apply(self.init_weights) self.distilled_training = False # init for classification def init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if m.bias is not None: nn.init.constant_(m.bias, 0) @torch.jit.ignore def no_weight_decay(self): return {k for k, _ in self.named_parameters() if 'attention_biases' in k} @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', # stem and embed blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for s in self.stages: s.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head, self.head_dist def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes if global_pool is not None: self.global_pool = global_pool self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() @torch.jit.ignore def set_distilled_training(self, enable=True): self.distilled_training = enable def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] take_indices, max_index = feature_take_indices(len(self.stages), indices) # forward pass x = self.stem(x) last_idx = len(self.stages) - 1 if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.stages else: stages = self.stages[:max_index + 1] for feat_idx, stage in enumerate(stages): x = stage(x) if feat_idx in take_indices: if feat_idx == last_idx: x_inter = self.norm(x) if norm else x intermediates.append(x_inter) else: intermediates.append(x) if intermediates_only: return intermediates if feat_idx == last_idx: x = self.norm(x) return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ take_indices, max_index = feature_take_indices(len(self.stages), indices) self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_norm: self.norm = nn.Identity() if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): x = self.stem(x) x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): if self.global_pool == 'avg': x = x.mean(dim=(2, 3)) x = self.head_drop(x) if pre_logits: return x x, x_dist = self.head(x), self.head_dist(x) if self.distilled_training and self.training and not torch.jit.is_scripting(): # only return separate classification predictions when training in distilled mode return x, x_dist else: # during standard train/finetune, inference average the classifier predictions return (x + x_dist) / 2 def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True, 'crop_pct': .95, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'classifier': ('head', 'head_dist'), 'first_conv': 'stem.conv1.conv', **kwargs } default_cfgs = generate_default_cfgs({ 'efficientformerv2_s0.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), 'efficientformerv2_s1.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), 'efficientformerv2_s2.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), 'efficientformerv2_l.snap_dist_in1k': _cfg( hf_hub_id='timm/', ), }) def _create_efficientformerv2(variant, pretrained=False, **kwargs): out_indices = kwargs.pop('out_indices', (0, 1, 2, 3)) model = build_model_with_cfg( EfficientFormerV2, variant, pretrained, feature_cfg=dict(flatten_sequential=True, out_indices=out_indices), **kwargs) return model @register_model def efficientformerv2_s0(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict( depths=EfficientFormer_depth['S0'], embed_dims=EfficientFormer_width['S0'], num_vit=2, drop_path_rate=0.0, mlp_ratios=EfficientFormer_expansion_ratios['S0'], ) return _create_efficientformerv2('efficientformerv2_s0', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformerv2_s1(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict( depths=EfficientFormer_depth['S1'], embed_dims=EfficientFormer_width['S1'], num_vit=2, drop_path_rate=0.0, mlp_ratios=EfficientFormer_expansion_ratios['S1'], ) return _create_efficientformerv2('efficientformerv2_s1', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformerv2_s2(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict( depths=EfficientFormer_depth['S2'], embed_dims=EfficientFormer_width['S2'], num_vit=4, drop_path_rate=0.02, mlp_ratios=EfficientFormer_expansion_ratios['S2'], ) return _create_efficientformerv2('efficientformerv2_s2', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def efficientformerv2_l(pretrained=False, **kwargs) -> EfficientFormerV2: model_args = dict( depths=EfficientFormer_depth['L'], embed_dims=EfficientFormer_width['L'], num_vit=6, drop_path_rate=0.1, mlp_ratios=EfficientFormer_expansion_ratios['L'], ) return _create_efficientformerv2('efficientformerv2_l', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/efficientformer_v2.py/0
{ "file_path": "pytorch-image-models/timm/models/efficientformer_v2.py", "repo_id": "pytorch-image-models", "token_count": 13921 }
270
""" An PyTorch implementation of Hiera Adapted for timm from originals at https://github.com/facebookresearch/hiera """ # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # -------------------------------------------------------- # # Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles # # Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan, # Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed, # Judy Hoffman, Jitendra Malik, Yanghao Li, Christoph Feichtenhofer. # # Paper: https://arxiv.org/abs/2306.00989/ # # References: # slowfast: https://github.com/facebookresearch/SlowFast # timm: https://github.com/rwightman/pytorch-image-models/tree/master/timm # -------------------------------------------------------- import math from functools import partial from typing import Dict, List, Optional, Tuple, Type, Union import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import DropPath, Mlp, LayerScale, ClNormMlpClassifierHead, use_fused_attn, \ _assert, get_norm_layer, to_2tuple, init_weight_vit, init_weight_jax from ._registry import generate_default_cfgs, register_model from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._features_fx import register_notrace_function from ._manipulate import named_apply, checkpoint __all__ = ['Hiera'] def conv_nd(n: int) -> Type[nn.Module]: """ Returns a conv with nd (e.g., Conv2d for n=2). Work up to n=3. If you wanted a 4d Hiera, you could probably just implement this for n=4. (no promises) """ return [nn.Identity, nn.Conv1d, nn.Conv2d, nn.Conv3d][n] @register_notrace_function def get_resized_mask(target_size: List[int], mask: torch.Tensor) -> torch.Tensor: # target_size: [(T), (H), W] # (spatial) mask: [B, C, (t), (h), w] if mask is None: return mask _assert(len(mask.shape[2:]) == len(target_size), "mask spatial shape and target_size must match.") if mask.shape[2:] != target_size: return F.interpolate(mask.float(), size=target_size) return mask def undo_windowing( x: torch.Tensor, shape: List[int], mu_shape: List[int], ) -> torch.Tensor: """ Restore spatial organization by undoing windowed organization of mask units. Args: x: organized by mask units windows, e.g. in 2d [B, #MUy*#MUx, MUy, MUx, C] shape: current spatial shape, if it were not organized into mask unit windows, e.g. in 2d [B, #MUy*MUy, #MUx*MUx, C]. mu_shape: current mask unit shape, e.g. in 2d [MUy, MUx] Returns: x: e.g. in 2d, [B, #MUy*MUy, #MUx*MUx, C] """ D = len(shape) B, C = x.shape[0], x.shape[-1] # [B, #MUy*#MUx, MUy, MUx, C] -> [B, #MUy, #MUx, MUy, MUx, C] num_MUs = [s // mu for s, mu in zip(shape, mu_shape)] x = x.view(B, *num_MUs, *mu_shape, C) # [B, #MUy, #MUx, MUy, MUx, C] -> [B, #MUy*MUy, #MUx*MUx, C] permute = ( [0] + sum([list(p) for p in zip(range(1, 1 + D), range(1 + D, 1 + 2 * D))], []) + [len(x.shape) - 1] ) x = x.permute(permute).reshape(B, *shape, C) return x class Unroll(nn.Module): """ Reorders the tokens such that patches are contiguous in memory. E.g., given [B, (H, W), C] and stride of (Sy, Sx), this will re-order the tokens as [B, (Sy, Sx, H // Sy, W // Sx), C] This allows operations like Max2d to be computed as x.view(B, Sx*Sy, -1, C).max(dim=1). Not only is this faster, but it also makes it easy to support inputs of arbitrary dimensions in addition to patch-wise sparsity. Performing this operation multiple times in sequence puts entire windows as contiguous in memory. For instance, if you applied the stride (2, 2) 3 times, entire windows of size 8x8 would be contiguous in memory, allowing operations like mask unit attention computed easily and efficiently, while also allowing max to be applied sequentially. Note: This means that intermediate values of the model are not in HxW order, so they need to be re-rolled if you want to use the intermediate values as a HxW feature map. The last block of the network is fine though, since by then the strides are all consumed. """ def __init__( self, input_size: Tuple[int, ...], patch_stride: Tuple[int, ...], unroll_schedule: List[Tuple[int, ...]], ): super().__init__() self.size = [i // s for i, s in zip(input_size, patch_stride)] self.schedule = unroll_schedule def forward(self, x: torch.Tensor) -> torch.Tensor: """ Input: Flattened patch embeddings [B, N, C] Output: Patch embeddings [B, N, C] permuted such that [B, 4, N//4, C].max(1) etc. performs MaxPoolNd """ B, _, C = x.shape cur_size = self.size x = x.view(*([B] + cur_size + [C])) for strides in self.schedule: # Move patches with the given strides to the batch dimension # Create a view of the tensor with the patch stride as separate dims # For example in 2d: [B, H // Sy, Sy, W // Sx, Sx, C] cur_size = [i // s for i, s in zip(cur_size, strides)] new_shape = [B] + sum([[i, s] for i, s in zip(cur_size, strides)], []) + [C] x = x.view(new_shape) # Move the patch stride into the batch dimension # For example in 2d: [B, Sy, Sx, H // Sy, W // Sx, C] L = len(new_shape) permute = [0] + list(range(2, L - 1, 2)) + list(range(1, L - 1, 2)) + [L - 1] x = x.permute(permute) # Now finally flatten the relevant dims into the batch dimension x = x.flatten(0, len(strides)) B *= math.prod(strides) x = x.reshape(-1, math.prod(self.size), C) return x class Reroll(nn.Module): """ Undos the "unroll" operation so that you can use intermediate features. """ def __init__( self, input_size: Tuple[int, ...], patch_stride: Tuple[int, ...], unroll_schedule: List[Tuple[int, ...]], stage_ends: List[int], q_pool: int, ): super().__init__() self.size = [i // s for i, s in zip(input_size, patch_stride)] # The first stage has to reverse everything # The next stage has to reverse all but the first unroll, etc. self.schedule = {} size = self.size for i in range(stage_ends[-1] + 1): self.schedule[i] = unroll_schedule, size # schedule unchanged if no pooling at a stage end if i in stage_ends[:q_pool]: if len(unroll_schedule) > 0: size = [n // s for n, s in zip(size, unroll_schedule[0])] unroll_schedule = unroll_schedule[1:] def forward( self, x: torch.Tensor, block_idx: int, mask: torch.Tensor = None ) -> torch.Tensor: """ Roll the given tensor back up to spatial order assuming it's from the given block. If no mask is provided: - Returns [B, H, W, C] for 2d, [B, T, H, W, C] for 3d, etc. If a mask is provided: - Returns [B, #MUs, MUy, MUx, C] for 2d, etc. """ schedule, size = self.schedule[block_idx] B, N, C = x.shape D = len(size) cur_mu_shape = [1] * D for strides in schedule: # Extract the current patch from N x = x.view(B, *strides, N // math.prod(strides), *cur_mu_shape, C) # Move that patch into the current MU # Example in 2d: [B, Sy, Sx, N//(Sy*Sx), MUy, MUx, C] -> [B, N//(Sy*Sx), Sy, MUy, Sx, MUx, C] L = len(x.shape) permute = ( [0, 1 + D] + sum([list(p) for p in zip(range(1, 1 + D), range(1 + D + 1, L - 1))], []) + [L - 1] ) x = x.permute(permute) # Reshape to [B, N//(Sy*Sx), *MU, C] for i in range(D): cur_mu_shape[i] *= strides[i] x = x.reshape(B, -1, *cur_mu_shape, C) N = x.shape[1] # Current shape (e.g., 2d: [B, #MUy*#MUx, MUy, MUx, C]) x = x.view(B, N, *cur_mu_shape, C) # If masked, return [B, #MUs, MUy, MUx, C] if mask is not None: return x # If not masked, we can return [B, H, W, C] x = undo_windowing(x, size, cur_mu_shape) return x class MaskUnitAttention(nn.Module): """ Computes either Mask Unit or Global Attention. Also is able to perform q pooling. Note: this assumes the tokens have already been flattened and unrolled into mask units. See `Unroll` for more details. """ fused_attn: torch.jit.Final[bool] def __init__( self, dim: int, dim_out: int, heads: int, q_stride: int = 1, window_size: int = 0, use_mask_unit_attn: bool = False, ): """ Args: - dim, dim_out: The input and output feature dimensions. - heads: The number of attention heads. - q_stride: If greater than 1, pool q with this stride. The stride should be flattened (e.g., 2x2 = 4). - window_size: The current (flattened) size of a mask unit *after* pooling (if any). - use_mask_unit_attn: Use Mask Unit or Global Attention. """ super().__init__() self.dim = dim self.dim_out = dim_out self.heads = heads self.q_stride = q_stride self.head_dim = dim_out // heads self.scale = self.head_dim ** -0.5 self.fused_attn = use_fused_attn() self.qkv = nn.Linear(dim, 3 * dim_out) self.proj = nn.Linear(dim_out, dim_out) self.window_size = window_size self.use_mask_unit_attn = use_mask_unit_attn def forward(self, x: torch.Tensor) -> torch.Tensor: """ Input should be of shape [batch, tokens, channels]. """ B, N, _ = x.shape num_windows = (N // (self.q_stride * self.window_size)) if self.use_mask_unit_attn else 1 qkv = self.qkv(x).reshape(B, -1, num_windows, 3, self.heads, self.head_dim).permute(3, 0, 4, 2, 1, 5) q, k, v = qkv.unbind(0) if self.q_stride > 1: # Refer to Unroll to see how this performs a maxpool-Nd q = q.view(B, self.heads, num_windows, self.q_stride, -1, self.head_dim).amax(dim=3) if self.fused_attn: # Note: the original paper did *not* use SDPA, it's a free boost! x = F.scaled_dot_product_attention(q, k, v) else: attn = (q * self.scale) @ k.transpose(-1, -2) attn = attn.softmax(dim=-1) x = attn @ v x = x.transpose(1, 3).reshape(B, -1, self.dim_out) x = self.proj(x) return x class HieraBlock(nn.Module): def __init__( self, dim: int, dim_out: int, heads: int, mlp_ratio: float = 4.0, drop_path: float = 0.0, init_values: Optional[float] = None, norm_layer: Type[nn.Module] = nn.LayerNorm, act_layer: Type[nn.Module] = nn.GELU, q_stride: int = 1, window_size: int = 0, use_expand_proj: bool = True, use_mask_unit_attn: bool = False, ): super().__init__() self.dim = dim self.dim_out = dim_out self.norm1 = norm_layer(dim) if dim != dim_out: self.do_expand = True if use_expand_proj: self.proj = nn.Linear(dim, dim_out) else: assert dim_out == dim * 2 self.proj = None else: self.do_expand = False self.proj = None self.attn = MaskUnitAttention( dim, dim_out, heads, q_stride, window_size, use_mask_unit_attn ) self.ls1 = LayerScale(dim_out, init_values=init_values) if init_values is not None else nn.Identity() self.drop_path1 = DropPath(drop_path) if drop_path > 0 else nn.Identity() self.norm2 = norm_layer(dim_out) self.mlp = Mlp(dim_out, int(dim_out * mlp_ratio), act_layer=act_layer) self.ls2 = LayerScale(dim_out, init_values=init_values) if init_values is not None else nn.Identity() self.drop_path2 = DropPath(drop_path) if drop_path > 0 else nn.Identity() def forward(self, x: torch.Tensor) -> torch.Tensor: # Attention + Q Pooling x_norm = self.norm1(x) if self.do_expand: if self.proj is not None: x = self.proj(x_norm) x = x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).amax(dim=1) # max-pool else: x = torch.cat([ x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).amax(dim=1), # max-pool x.view(x.shape[0], self.attn.q_stride, -1, x.shape[-1]).mean(dim=1), # avg-pool ], dim=-1, ) x = x + self.drop_path1(self.ls1(self.attn(x_norm))) # MLP x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x)))) return x class PatchEmbed(nn.Module): """Patch embed that supports any number of spatial dimensions (1d, 2d, 3d).""" def __init__( self, dim_in: int, dim_out: int, kernel: Tuple[int, ...], stride: Tuple[int, ...], padding: Tuple[int, ...], reshape: bool = True, ): super().__init__() # Support any number of spatial dimensions self.spatial_dims = len(kernel) self.reshape = reshape self.proj = conv_nd(self.spatial_dims)( dim_in, dim_out, kernel_size=kernel, stride=stride, padding=padding, ) def forward( self, x: torch.Tensor, mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: if mask is not None: mask = get_resized_mask(target_size=x.shape[2:], mask=mask) x = self.proj(x * mask.to(torch.bool)) else: x = self.proj(x) if self.reshape: x = x.reshape(x.shape[0], x.shape[1], -1).transpose(2, 1) return x class Hiera(nn.Module): def __init__( self, img_size: Tuple[int, ...] = (224, 224), in_chans: int = 3, embed_dim: int = 96, # initial embed dim num_heads: int = 1, # initial number of heads num_classes: int = 1000, global_pool: str = 'avg', stages: Tuple[int, ...] = (2, 3, 16, 3), q_pool: int = 3, # number of q_pool stages q_stride: Tuple[int, ...] = (2, 2), mask_unit_size: Tuple[int, ...] = (8, 8), # must divide q_stride ** (#stages-1) # mask_unit_attn: which stages use mask unit attention? mask_unit_attn: Tuple[bool, ...] = (True, True, False, False), use_expand_proj: bool = True, dim_mul: float = 2.0, head_mul: float = 2.0, patch_kernel: Tuple[int, ...] = (7, 7), patch_stride: Tuple[int, ...] = (4, 4), patch_padding: Tuple[int, ...] = (3, 3), mlp_ratio: float = 4.0, drop_path_rate: float = 0.0, init_values: Optional[float] = None, fix_init: bool = True, weight_init: str = '', norm_layer: Union[str, nn.Module] = "LayerNorm", drop_rate: float = 0.0, patch_drop_rate: float = 0.0, head_init_scale: float = 0.001, sep_pos_embed: bool = False, abs_win_pos_embed: bool = False, global_pos_size: Tuple[int, int] = (14, 14), ): super().__init__() self.num_classes = num_classes self.grad_checkpointing = False norm_layer = get_norm_layer(norm_layer) if isinstance(img_size, int): img_size = to_2tuple(img_size) self.patch_stride = patch_stride self.tokens_spatial_shape = [i // s for i, s in zip(img_size, patch_stride)] num_tokens = math.prod(self.tokens_spatial_shape) flat_mu_size = math.prod(mask_unit_size) flat_q_stride = math.prod(q_stride) assert q_pool < len(stages) self.q_pool, self.q_stride = q_pool, q_stride self.mu_size, self.mask_unit_size = flat_mu_size, mask_unit_size self.mask_spatial_shape = [i // s for i, s in zip(self.tokens_spatial_shape, self.mask_unit_size)] self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)] self.patch_drop_rate = patch_drop_rate self.patch_embed = PatchEmbed( in_chans, embed_dim, patch_kernel, patch_stride, patch_padding, ) self.pos_embed: Optional[nn.Parameter] = None self.pos_embed_win: Optional[nn.Parameter] = None self.pos_embed_spatial: Optional[nn.Parameter] = None self.pos_embed_temporal: Optional[nn.Parameter] = None if sep_pos_embed: self.pos_embed_spatial = nn.Parameter( torch.zeros(1, self.tokens_spatial_shape[1] * self.tokens_spatial_shape[2], embed_dim) ) self.pos_embed_temporal = nn.Parameter( torch.zeros(1, self.tokens_spatial_shape[0], embed_dim) ) else: if abs_win_pos_embed: # absolute win, params NCHW to make tile & interpolate more natural before add & reshape self.pos_embed = nn.Parameter(torch.zeros(1, embed_dim, *global_pos_size)) self.pos_embed_win = nn.Parameter(torch.zeros(1, embed_dim, *mask_unit_size)) else: self.pos_embed = nn.Parameter(torch.zeros(1, num_tokens, embed_dim)) # Setup roll and reroll modules self.unroll = Unroll( img_size, patch_stride, [q_stride] * len(self.stage_ends[:-1]) ) self.reroll = Reroll( img_size, patch_stride, [q_stride] * len(self.stage_ends[:-1]), self.stage_ends, q_pool, ) # q_pool locations q_pool_blocks = [x + 1 for x in self.stage_ends[:q_pool]] # Transformer blocks cur_stage = 0 depth = sum(stages) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList() self.feature_info = [] for i in range(depth): dim_out = embed_dim # Mask unit or global attention. # Lag by 1 block, so that global attention, # applied post pooling on lower resolution use_mask_unit_attn = mask_unit_attn[cur_stage] if i - 1 in self.stage_ends: dim_out = int(embed_dim * dim_mul) num_heads = int(num_heads * head_mul) cur_stage += 1 if i in q_pool_blocks: flat_mu_size //= flat_q_stride block = HieraBlock( dim=embed_dim, dim_out=dim_out, heads=num_heads, mlp_ratio=mlp_ratio, drop_path=dpr[i], init_values=init_values, norm_layer=norm_layer, q_stride=(flat_q_stride if i in q_pool_blocks else 1), window_size=flat_mu_size, use_expand_proj=use_expand_proj, use_mask_unit_attn=use_mask_unit_attn, ) embed_dim = dim_out if i in self.stage_ends: self.feature_info += [ dict(num_chs=dim_out, reduction=2**(cur_stage+2), module=f'blocks.{self.stage_ends[cur_stage]}')] self.blocks.append(block) self.num_features = self.head_hidden_size = embed_dim self.head = ClNormMlpClassifierHead( embed_dim, num_classes, pool_type=global_pool, drop_rate=drop_rate, norm_layer=norm_layer, input_fmt='NLC', ) # Initialize everything if sep_pos_embed: nn.init.trunc_normal_(self.pos_embed_spatial, std=0.02) nn.init.trunc_normal_(self.pos_embed_temporal, std=0.02) else: if self.pos_embed is not None: nn.init.trunc_normal_(self.pos_embed, std=0.02) if self.pos_embed_win is not None: nn.init.trunc_normal_(self.pos_embed_win, std=0.02) if weight_init != 'skip': init_fn = init_weight_jax if weight_init == 'jax' else init_weight_vit init_fn = partial(init_fn, classifier_name='head.fc') named_apply(init_fn, self) if fix_init: self.fix_init_weight() if isinstance(self.head.fc, nn.Linear): self.head.fc.weight.data.mul_(head_init_scale) self.head.fc.bias.data.mul_(head_init_scale) def fix_init_weight(self): def rescale(param, _layer_id): param.div_(math.sqrt(2.0 * _layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) @torch.jit.ignore def no_weight_decay(self): if self.pos_embed is not None: return ["pos_embed"] elif self.pos_embed_abs is not None: return ['pos_embed_abs', 'pos_embed_win'] else: return ["pos_embed_spatial", "pos_embed_temporal"] @torch.jit.ignore def group_matcher(self, coarse: bool = False) -> Dict: return dict( stem=r'^pos_embed|pos_embed_spatial|pos_embed_temporal|pos_embed_abs|pos_embed_win|patch_embed', blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))] ) @torch.jit.ignore def set_grad_checkpointing(self, enable: bool = True) -> None: self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None, reset_other: bool = False): self.num_classes = num_classes self.head.reset(num_classes, global_pool, reset_other=reset_other) def get_random_mask(self, x: torch.Tensor, mask_ratio: float) -> torch.Tensor: """ Generates a random mask, mask_ratio fraction are dropped. 1 is *keep*, 0 is *remove*. Useful for MAE, FLIP, etc. """ B = x.shape[0] # Tokens selected for masking at mask unit level num_windows = math.prod(self.mask_spatial_shape) # num_mask_units len_keep = int(num_windows * (1 - mask_ratio)) noise = torch.rand(B, num_windows, device=x.device) # Sort noise for each sample ids_shuffle = torch.argsort(noise, dim=1) # ascend: small is keep, large is remove ids_restore = torch.argsort(ids_shuffle, dim=1) # Generate the binary mask: 1 is *keep*, 0 is *remove* # Note this is opposite to original MAE mask = torch.zeros([B, num_windows], device=x.device) mask[:, :len_keep] = 1 # Unshuffle to get the binary mask mask = torch.gather(mask, dim=1, index=ids_restore) return mask.bool() def _pos_embed(self, x) -> torch.Tensor: if self.pos_embed_win is not None: # absolute win position embedding, from # Window Attention is Bugged: How not to Interpolate Position Embeddings (https://arxiv.org/abs/2311.05613) pos_embed_win = self.pos_embed_win.tile(self.mask_spatial_shape) pos_embed = F.interpolate( self.pos_embed, size=pos_embed_win.shape[-2:], mode='bicubic', antialias=True, ) pos_embed = pos_embed + pos_embed_win pos_embed = pos_embed.flatten(2).transpose(1, 2) elif self.pos_embed is not None: pos_embed = self.pos_embed else: pos_embed = ( self.pos_embed_spatial.repeat(1, self.tokens_spatial_shape[0], 1) + torch.repeat_interleave( self.pos_embed_temporal, self.tokens_spatial_shape[1] * self.tokens_spatial_shape[2], dim=1, ) ) x = x + pos_embed return x def forward_intermediates( self, x: torch.Tensor, mask: Optional[torch.Tensor] = None, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = True, output_fmt: str = 'NCHW', intermediates_only: bool = False, coarse: bool = True, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to all intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert not norm, 'normalization of features not supported' assert output_fmt in ('NCHW', 'NHWC'), 'Output format must be one of NCHW, NHWC.' if coarse: take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) take_indices = [self.stage_ends[i] for i in take_indices] max_index = self.stage_ends[max_index] else: take_indices, max_index = feature_take_indices(len(self.blocks), indices) if mask is not None: patch_mask = mask.view(x.shape[0], 1, *self.mask_spatial_shape) # B, C, *mask_spatial_shape else: patch_mask = None x = self.patch_embed(x, mask=patch_mask) x = self._pos_embed(x) x = self.unroll(x) # Discard masked tokens if mask is not None: x = x[mask[..., None].tile(1, self.mu_size, x.shape[2])].view(x.shape[0], -1, x.shape[-1]) intermediates = [] if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript blocks = self.blocks else: blocks = self.blocks[:max_index + 1] for i, blk in enumerate(blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x) else: x = blk(x) if i in take_indices: x_int = self.reroll(x, i, mask=mask) intermediates.append(x_int.permute(0, 3, 1, 2) if output_fmt == 'NCHW' else x_int) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, coarse: bool = True, ): """ Prune layers not required for specified intermediates. """ if coarse: take_indices, max_index = feature_take_indices(len(self.stage_ends), indices) max_index = self.stage_ends[max_index] else: take_indices, max_index = feature_take_indices(len(self.blocks), indices) self.blocks = self.blocks[:max_index + 1] # truncate blocks if prune_head: self.head.reset(0, reset_other=True) return take_indices def forward_features( self, x: torch.Tensor, mask: Optional[torch.Tensor] = None, return_intermediates: bool = False, ) -> torch.Tensor: """ mask should be a boolean tensor of shape [B, #MUt*#MUy*#MUx] where #MU are the number of mask units in that dim. Note: 1 in mask is *keep*, 0 is *remove*; mask.sum(dim=-1) should be the same across the batch. """ if self.training and self.patch_drop_rate > 0: # using mask for something like 'patch dropout' via mask-units in supervised train / fine-tune assert mask is None mask = self.get_random_mask(x, mask_ratio=self.patch_drop_rate) if mask is not None: patch_mask = mask.view(x.shape[0], 1, *self.mask_spatial_shape) # B, C, *mask_spatial_shape else: patch_mask = None x = self.patch_embed(x, mask=patch_mask) x = self._pos_embed(x) x = self.unroll(x) # Discard masked tokens if mask is not None: x = x[mask[..., None].tile(1, self.mu_size, x.shape[2])].view(x.shape[0], -1, x.shape[-1]) intermediates = [] for i, blk in enumerate(self.blocks): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(blk, x) else: x = blk(x) if return_intermediates and i in self.stage_ends: intermediates.append(self.reroll(x, i, mask=mask)) # x may not always be in spatial order here. # e.g. if q_pool = 2, mask_unit_size = (8, 8), and # q_stride = (2, 2), not all unrolls were consumed, # intermediates[-1] is x in spatial order if return_intermediates: return x, intermediates return x def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor: x = self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) return x def forward( self, x: torch.Tensor, mask: Optional[torch.Tensor] = None, ) -> torch.Tensor: x = self.forward_features(x, mask=mask) if mask is None: x = self.forward_head(x) return x def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'patch_embed.proj', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ "hiera_tiny_224.mae_in1k_ft_in1k": _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', ), "hiera_tiny_224.mae": _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', num_classes=0, ), "hiera_small_224.mae_in1k_ft_in1k": _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', ), "hiera_small_224.mae": _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', num_classes=0, ), "hiera_base_224.mae_in1k_ft_in1k": _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', ), "hiera_base_224.mae": _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', num_classes=0, ), "hiera_base_plus_224.mae_in1k_ft_in1k": _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', ), "hiera_base_plus_224.mae": _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', num_classes=0, ), "hiera_large_224.mae_in1k_ft_in1k": _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', ), "hiera_large_224.mae": _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', num_classes=0, ), "hiera_huge_224.mae_in1k_ft_in1k": _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', ), "hiera_huge_224.mae": _cfg( hf_hub_id='timm/', license='cc-by-nc-4.0', num_classes=0, ), "hiera_small_abswin_256.sbb2_e200_in12k_ft_in1k": _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, ), "hiera_small_abswin_256.sbb2_pd_e200_in12k_ft_in1k": _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, ), "hiera_small_abswin_256.sbb2_e200_in12k": _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95, ), "hiera_small_abswin_256.sbb2_pd_e200_in12k": _cfg( hf_hub_id='timm/', num_classes=11821, input_size=(3, 256, 256), crop_pct=0.95, ), "hiera_base_abswin_256.untrained": _cfg( # hf_hub_id='timm/', input_size=(3, 256, 256), crop_pct=0.95, ), }) def checkpoint_filter_fn(state_dict, model=None): state_dict = state_dict.get('model_state', state_dict) output = {} for k, v in state_dict.items(): # if k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]: # # To resize pos embedding when using model at different size from pretrained weights # from timm.layers import resample_abs_pos_embed # v = resample_abs_pos_embed( # v, # new_size=(64, 64), # num_prefix_tokens=0, # verbose=True, # ) if 'head.projection.' in k: k = k.replace('head.projection.', 'head.fc.') if k.startswith('encoder_norm.'): k = k.replace('encoder_norm.', 'head.norm.') elif k.startswith('norm.'): k = k.replace('norm.', 'head.norm.') if k == 'pos_embed_abs': k = 'pos_embed' output[k] = v return output def _create_hiera(variant: str, pretrained: bool = False, **kwargs) -> Hiera: out_indices = kwargs.pop('out_indices', 4) return build_model_with_cfg( Hiera, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=out_indices, feature_cls='getter'), **kwargs, ) @register_model def hiera_tiny_224(pretrained=False, **kwargs): model_args = dict(embed_dim=96, num_heads=1, stages=(1, 2, 7, 2)) return _create_hiera('hiera_tiny_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_small_224(pretrained=False, **kwargs): model_args = dict(embed_dim=96, num_heads=1, stages=(1, 2, 11, 2)) return _create_hiera('hiera_small_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_base_224(pretrained=False, **kwargs): model_args = dict(embed_dim=96, num_heads=1, stages=(2, 3, 16, 3)) return _create_hiera('hiera_base_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_base_plus_224(pretrained=False, **kwargs): model_args = dict(embed_dim=112, num_heads=2, stages=(2, 3, 16, 3)) return _create_hiera('hiera_base_plus_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_large_224(pretrained=False, **kwargs): model_args = dict(embed_dim=144, num_heads=2, stages=(2, 6, 36, 4)) return _create_hiera('hiera_large_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_huge_224(pretrained=False, **kwargs): model_args = dict(embed_dim=256, num_heads=4, stages=(2, 6, 36, 4)) return _create_hiera('hiera_huge_224', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_small_abswin_256(pretrained=False, **kwargs): model_args = dict( embed_dim=96, num_heads=1, stages=(1, 2, 11, 2), abs_win_pos_embed=True, global_pos_size=(16, 16), init_values=1e-5, weight_init='jax', use_expand_proj=False, ) return _create_hiera('hiera_small_abswin_256', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def hiera_base_abswin_256(pretrained=False, **kwargs): model_args = dict( embed_dim=96, num_heads=1, stages=(2, 3, 16, 3), abs_win_pos_embed=True, init_values=1e-5, weight_init='jax') return _create_hiera('hiera_base_abswin_256', pretrained=pretrained, **dict(model_args, **kwargs))
pytorch-image-models/timm/models/hiera.py/0
{ "file_path": "pytorch-image-models/timm/models/hiera.py", "repo_id": "pytorch-image-models", "token_count": 18174 }
271
""" MobileViT Paper: V1: `MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer` - https://arxiv.org/abs/2110.02178 V2: `Separable Self-attention for Mobile Vision Transformers` - https://arxiv.org/abs/2206.02680 MobileVitBlock and checkpoints adapted from https://github.com/apple/ml-cvnets (original copyright below) License: https://github.com/apple/ml-cvnets/blob/main/LICENSE (Apple open source) Rest of code, ByobNet, and Transformer block hacked together by / Copyright 2022, Ross Wightman """ # # For licensing see accompanying LICENSE file. # Copyright (C) 2020 Apple Inc. All Rights Reserved. # import math from typing import Callable, Tuple, Optional import torch import torch.nn.functional as F from torch import nn from timm.layers import to_2tuple, make_divisible, GroupNorm1, ConvMlp, DropPath, is_exportable from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs, register_model_deprecations from .byobnet import register_block, ByoBlockCfg, ByoModelCfg, ByobNet, LayerFn, num_groups from .vision_transformer import Block as TransformerBlock __all__ = [] def _inverted_residual_block(d, c, s, br=4.0): # inverted residual is a bottleneck block with bottle_ratio > 1 applied to in_chs, linear output, gs=1 (depthwise) return ByoBlockCfg( type='bottle', d=d, c=c, s=s, gs=1, br=br, block_kwargs=dict(bottle_in=True, linear_out=True)) def _mobilevit_block(d, c, s, transformer_dim, transformer_depth, patch_size=4, br=4.0): # inverted residual + mobilevit blocks as per MobileViT network return ( _inverted_residual_block(d=d, c=c, s=s, br=br), ByoBlockCfg( type='mobilevit', d=1, c=c, s=1, block_kwargs=dict( transformer_dim=transformer_dim, transformer_depth=transformer_depth, patch_size=patch_size) ) ) def _mobilevitv2_block(d, c, s, transformer_depth, patch_size=2, br=2.0, transformer_br=0.5): # inverted residual + mobilevit blocks as per MobileViT network return ( _inverted_residual_block(d=d, c=c, s=s, br=br), ByoBlockCfg( type='mobilevit2', d=1, c=c, s=1, br=transformer_br, gs=1, block_kwargs=dict( transformer_depth=transformer_depth, patch_size=patch_size) ) ) def _mobilevitv2_cfg(multiplier=1.0): chs = (64, 128, 256, 384, 512) if multiplier != 1.0: chs = tuple([int(c * multiplier) for c in chs]) cfg = ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=chs[0], s=1, br=2.0), _inverted_residual_block(d=2, c=chs[1], s=2, br=2.0), _mobilevitv2_block(d=1, c=chs[2], s=2, transformer_depth=2), _mobilevitv2_block(d=1, c=chs[3], s=2, transformer_depth=4), _mobilevitv2_block(d=1, c=chs[4], s=2, transformer_depth=3), ), stem_chs=int(32 * multiplier), stem_type='3x3', stem_pool='', downsample='', act_layer='silu', ) return cfg model_cfgs = dict( mobilevit_xxs=ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=16, s=1, br=2.0), _inverted_residual_block(d=3, c=24, s=2, br=2.0), _mobilevit_block(d=1, c=48, s=2, transformer_dim=64, transformer_depth=2, patch_size=2, br=2.0), _mobilevit_block(d=1, c=64, s=2, transformer_dim=80, transformer_depth=4, patch_size=2, br=2.0), _mobilevit_block(d=1, c=80, s=2, transformer_dim=96, transformer_depth=3, patch_size=2, br=2.0), ), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', act_layer='silu', num_features=320, ), mobilevit_xs=ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=32, s=1), _inverted_residual_block(d=3, c=48, s=2), _mobilevit_block(d=1, c=64, s=2, transformer_dim=96, transformer_depth=2, patch_size=2), _mobilevit_block(d=1, c=80, s=2, transformer_dim=120, transformer_depth=4, patch_size=2), _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=3, patch_size=2), ), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', act_layer='silu', num_features=384, ), mobilevit_s=ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=32, s=1), _inverted_residual_block(d=3, c=64, s=2), _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2), _mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2), _mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2), ), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', act_layer='silu', num_features=640, ), semobilevit_s=ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=32, s=1), _inverted_residual_block(d=3, c=64, s=2), _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2), _mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2), _mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2), ), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', attn_layer='se', attn_kwargs=dict(rd_ratio=1/8), num_features=640, ), mobilevitv2_050=_mobilevitv2_cfg(.50), mobilevitv2_075=_mobilevitv2_cfg(.75), mobilevitv2_125=_mobilevitv2_cfg(1.25), mobilevitv2_100=_mobilevitv2_cfg(1.0), mobilevitv2_150=_mobilevitv2_cfg(1.5), mobilevitv2_175=_mobilevitv2_cfg(1.75), mobilevitv2_200=_mobilevitv2_cfg(2.0), ) @register_notrace_module class MobileVitBlock(nn.Module): """ MobileViT block Paper: https://arxiv.org/abs/2110.02178?context=cs.LG """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, kernel_size: int = 3, stride: int = 1, bottle_ratio: float = 1.0, group_size: Optional[int] = None, dilation: Tuple[int, int] = (1, 1), mlp_ratio: float = 2.0, transformer_dim: Optional[int] = None, transformer_depth: int = 2, patch_size: int = 8, num_heads: int = 4, attn_drop: float = 0., drop: int = 0., no_fusion: bool = False, drop_path_rate: float = 0., layers: LayerFn = None, transformer_norm_layer: Callable = nn.LayerNorm, **kwargs, # eat unused args ): super(MobileVitBlock, self).__init__() layers = layers or LayerFn() groups = num_groups(group_size, in_chs) out_chs = out_chs or in_chs transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs) self.conv_kxk = layers.conv_norm_act( in_chs, in_chs, kernel_size=kernel_size, stride=stride, groups=groups, dilation=dilation[0]) self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False) self.transformer = nn.Sequential(*[ TransformerBlock( transformer_dim, mlp_ratio=mlp_ratio, num_heads=num_heads, qkv_bias=True, attn_drop=attn_drop, proj_drop=drop, drop_path=drop_path_rate, act_layer=layers.act, norm_layer=transformer_norm_layer, ) for _ in range(transformer_depth) ]) self.norm = transformer_norm_layer(transformer_dim) self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1) if no_fusion: self.conv_fusion = None else: self.conv_fusion = layers.conv_norm_act(in_chs + out_chs, out_chs, kernel_size=kernel_size, stride=1) self.patch_size = to_2tuple(patch_size) self.patch_area = self.patch_size[0] * self.patch_size[1] def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x # Local representation x = self.conv_kxk(x) x = self.conv_1x1(x) # Unfold (feature map -> patches) patch_h, patch_w = self.patch_size B, C, H, W = x.shape new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w num_patches = num_patch_h * num_patch_w # N interpolate = False if new_h != H or new_w != W: # Note: Padding can be done, but then it needs to be handled in attention function. x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=False) interpolate = True # [B, C, H, W] --> [B * C * n_h, n_w, p_h, p_w] x = x.reshape(B * C * num_patch_h, patch_h, num_patch_w, patch_w).transpose(1, 2) # [B * C * n_h, n_w, p_h, p_w] --> [BP, N, C] where P = p_h * p_w and N = n_h * n_w x = x.reshape(B, C, num_patches, self.patch_area).transpose(1, 3).reshape(B * self.patch_area, num_patches, -1) # Global representations x = self.transformer(x) x = self.norm(x) # Fold (patch -> feature map) # [B, P, N, C] --> [B*C*n_h, n_w, p_h, p_w] x = x.contiguous().view(B, self.patch_area, num_patches, -1) x = x.transpose(1, 3).reshape(B * C * num_patch_h, num_patch_w, patch_h, patch_w) # [B*C*n_h, n_w, p_h, p_w] --> [B*C*n_h, p_h, n_w, p_w] --> [B, C, H, W] x = x.transpose(1, 2).reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w) if interpolate: x = F.interpolate(x, size=(H, W), mode="bilinear", align_corners=False) x = self.conv_proj(x) if self.conv_fusion is not None: x = self.conv_fusion(torch.cat((shortcut, x), dim=1)) return x class LinearSelfAttention(nn.Module): """ This layer applies a self-attention with linear complexity, as described in `https://arxiv.org/abs/2206.02680` This layer can be used for self- as well as cross-attention. Args: embed_dim (int): :math:`C` from an expected input of size :math:`(N, C, H, W)` attn_drop (float): Dropout value for context scores. Default: 0.0 bias (bool): Use bias in learnable layers. Default: True Shape: - Input: :math:`(N, C, P, N)` where :math:`N` is the batch size, :math:`C` is the input channels, :math:`P` is the number of pixels in the patch, and :math:`N` is the number of patches - Output: same as the input .. note:: For MobileViTv2, we unfold the feature map [B, C, H, W] into [B, C, P, N] where P is the number of pixels in a patch and N is the number of patches. Because channel is the first dimension in this unfolded tensor, we use point-wise convolution (instead of a linear layer). This avoids a transpose operation (which may be expensive on resource-constrained devices) that may be required to convert the unfolded tensor from channel-first to channel-last format in case of a linear layer. """ def __init__( self, embed_dim: int, attn_drop: float = 0.0, proj_drop: float = 0.0, bias: bool = True, ) -> None: super().__init__() self.embed_dim = embed_dim self.qkv_proj = nn.Conv2d( in_channels=embed_dim, out_channels=1 + (2 * embed_dim), bias=bias, kernel_size=1, ) self.attn_drop = nn.Dropout(attn_drop) self.out_proj = nn.Conv2d( in_channels=embed_dim, out_channels=embed_dim, bias=bias, kernel_size=1, ) self.out_drop = nn.Dropout(proj_drop) def _forward_self_attn(self, x: torch.Tensor) -> torch.Tensor: # [B, C, P, N] --> [B, h + 2d, P, N] qkv = self.qkv_proj(x) # Project x into query, key and value # Query --> [B, 1, P, N] # value, key --> [B, d, P, N] query, key, value = qkv.split([1, self.embed_dim, self.embed_dim], dim=1) # apply softmax along N dimension context_scores = F.softmax(query, dim=-1) context_scores = self.attn_drop(context_scores) # Compute context vector # [B, d, P, N] x [B, 1, P, N] -> [B, d, P, N] --> [B, d, P, 1] context_vector = (key * context_scores).sum(dim=-1, keepdim=True) # combine context vector with values # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] out = F.relu(value) * context_vector.expand_as(value) out = self.out_proj(out) out = self.out_drop(out) return out @torch.jit.ignore() def _forward_cross_attn(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: # x --> [B, C, P, N] # x_prev = [B, C, P, M] batch_size, in_dim, kv_patch_area, kv_num_patches = x.shape q_patch_area, q_num_patches = x.shape[-2:] assert ( kv_patch_area == q_patch_area ), "The number of pixels in a patch for query and key_value should be the same" # compute query, key, and value # [B, C, P, M] --> [B, 1 + d, P, M] qk = F.conv2d( x_prev, weight=self.qkv_proj.weight[:self.embed_dim + 1], bias=self.qkv_proj.bias[:self.embed_dim + 1], ) # [B, 1 + d, P, M] --> [B, 1, P, M], [B, d, P, M] query, key = qk.split([1, self.embed_dim], dim=1) # [B, C, P, N] --> [B, d, P, N] value = F.conv2d( x, weight=self.qkv_proj.weight[self.embed_dim + 1], bias=self.qkv_proj.bias[self.embed_dim + 1] if self.qkv_proj.bias is not None else None, ) # apply softmax along M dimension context_scores = F.softmax(query, dim=-1) context_scores = self.attn_drop(context_scores) # compute context vector # [B, d, P, M] * [B, 1, P, M] -> [B, d, P, M] --> [B, d, P, 1] context_vector = (key * context_scores).sum(dim=-1, keepdim=True) # combine context vector with values # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] out = F.relu(value) * context_vector.expand_as(value) out = self.out_proj(out) out = self.out_drop(out) return out def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: if x_prev is None: return self._forward_self_attn(x) else: return self._forward_cross_attn(x, x_prev=x_prev) class LinearTransformerBlock(nn.Module): """ This class defines the pre-norm transformer encoder with linear self-attention in `MobileViTv2 paper <>`_ Args: embed_dim (int): :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, P, N)` mlp_ratio (float): Inner dimension ratio of the FFN relative to embed_dim drop (float): Dropout rate. Default: 0.0 attn_drop (float): Dropout rate for attention in multi-head attention. Default: 0.0 drop_path (float): Stochastic depth rate Default: 0.0 norm_layer (Callable): Normalization layer. Default: layer_norm_2d Shape: - Input: :math:`(B, C_{in}, P, N)` where :math:`B` is batch size, :math:`C_{in}` is input embedding dim, :math:`P` is number of pixels in a patch, and :math:`N` is number of patches, - Output: same shape as the input """ def __init__( self, embed_dim: int, mlp_ratio: float = 2.0, drop: float = 0.0, attn_drop: float = 0.0, drop_path: float = 0.0, act_layer=None, norm_layer=None, ) -> None: super().__init__() act_layer = act_layer or nn.SiLU norm_layer = norm_layer or GroupNorm1 self.norm1 = norm_layer(embed_dim) self.attn = LinearSelfAttention(embed_dim=embed_dim, attn_drop=attn_drop, proj_drop=drop) self.drop_path1 = DropPath(drop_path) self.norm2 = norm_layer(embed_dim) self.mlp = ConvMlp( in_features=embed_dim, hidden_features=int(embed_dim * mlp_ratio), act_layer=act_layer, drop=drop) self.drop_path2 = DropPath(drop_path) def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: if x_prev is None: # self-attention x = x + self.drop_path1(self.attn(self.norm1(x))) else: # cross-attention res = x x = self.norm1(x) # norm x = self.attn(x, x_prev) # attn x = self.drop_path1(x) + res # residual # Feed forward network x = x + self.drop_path2(self.mlp(self.norm2(x))) return x @register_notrace_module class MobileVitV2Block(nn.Module): """ This class defines the `MobileViTv2 block <>`_ """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, kernel_size: int = 3, bottle_ratio: float = 1.0, group_size: Optional[int] = 1, dilation: Tuple[int, int] = (1, 1), mlp_ratio: float = 2.0, transformer_dim: Optional[int] = None, transformer_depth: int = 2, patch_size: int = 8, attn_drop: float = 0., drop: int = 0., drop_path_rate: float = 0., layers: LayerFn = None, transformer_norm_layer: Callable = GroupNorm1, **kwargs, # eat unused args ): super(MobileVitV2Block, self).__init__() layers = layers or LayerFn() groups = num_groups(group_size, in_chs) out_chs = out_chs or in_chs transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs) self.conv_kxk = layers.conv_norm_act( in_chs, in_chs, kernel_size=kernel_size, stride=1, groups=groups, dilation=dilation[0]) self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False) self.transformer = nn.Sequential(*[ LinearTransformerBlock( transformer_dim, mlp_ratio=mlp_ratio, attn_drop=attn_drop, drop=drop, drop_path=drop_path_rate, act_layer=layers.act, norm_layer=transformer_norm_layer ) for _ in range(transformer_depth) ]) self.norm = transformer_norm_layer(transformer_dim) self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1, apply_act=False) self.patch_size = to_2tuple(patch_size) self.patch_area = self.patch_size[0] * self.patch_size[1] self.coreml_exportable = is_exportable() def forward(self, x: torch.Tensor) -> torch.Tensor: B, C, H, W = x.shape patch_h, patch_w = self.patch_size new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w num_patches = num_patch_h * num_patch_w # N if new_h != H or new_w != W: x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=True) # Local representation x = self.conv_kxk(x) x = self.conv_1x1(x) # Unfold (feature map -> patches), [B, C, H, W] -> [B, C, P, N] C = x.shape[1] if self.coreml_exportable: x = F.unfold(x, kernel_size=(patch_h, patch_w), stride=(patch_h, patch_w)) else: x = x.reshape(B, C, num_patch_h, patch_h, num_patch_w, patch_w).permute(0, 1, 3, 5, 2, 4) x = x.reshape(B, C, -1, num_patches) # Global representations x = self.transformer(x) x = self.norm(x) # Fold (patches -> feature map), [B, C, P, N] --> [B, C, H, W] if self.coreml_exportable: # adopted from https://github.com/apple/ml-cvnets/blob/main/cvnets/modules/mobilevit_block.py#L609-L624 x = x.reshape(B, C * patch_h * patch_w, num_patch_h, num_patch_w) x = F.pixel_shuffle(x, upscale_factor=patch_h) else: x = x.reshape(B, C, patch_h, patch_w, num_patch_h, num_patch_w).permute(0, 1, 4, 2, 5, 3) x = x.reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w) x = self.conv_proj(x) return x register_block('mobilevit', MobileVitBlock) register_block('mobilevit2', MobileVitV2Block) def _create_mobilevit(variant, cfg_variant=None, pretrained=False, **kwargs): return build_model_with_cfg( ByobNet, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), **kwargs) def _create_mobilevit2(variant, cfg_variant=None, pretrained=False, **kwargs): return build_model_with_cfg( ByobNet, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': (0., 0., 0.), 'std': (1., 1., 1.), 'first_conv': 'stem.conv', 'classifier': 'head.fc', 'fixed_input_size': False, **kwargs } default_cfgs = generate_default_cfgs({ 'mobilevit_xxs.cvnets_in1k': _cfg(hf_hub_id='timm/'), 'mobilevit_xs.cvnets_in1k': _cfg(hf_hub_id='timm/'), 'mobilevit_s.cvnets_in1k': _cfg(hf_hub_id='timm/'), 'mobilevitv2_050.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_075.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_100.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_125.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_150.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_175.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_200.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_150.cvnets_in22k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_175.cvnets_in22k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_200.cvnets_in22k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_150.cvnets_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'mobilevitv2_175.cvnets_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'mobilevitv2_200.cvnets_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), }) @register_model def mobilevit_xxs(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevit_xxs', pretrained=pretrained, **kwargs) @register_model def mobilevit_xs(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevit_xs', pretrained=pretrained, **kwargs) @register_model def mobilevit_s(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevit_s', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_050(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_050', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_075(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_075', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_100(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_100', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_125(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_125', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_150(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_150', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_175(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_175', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_200(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_200', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, { 'mobilevitv2_150_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k', 'mobilevitv2_175_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k', 'mobilevitv2_200_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k', 'mobilevitv2_150_384_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k_384', 'mobilevitv2_175_384_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k_384', 'mobilevitv2_200_384_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k_384', })
pytorch-image-models/timm/models/mobilevit.py/0
{ "file_path": "pytorch-image-models/timm/models/mobilevit.py", "repo_id": "pytorch-image-models", "token_count": 12812 }
272
""" ResNeSt Models Paper: `ResNeSt: Split-Attention Networks` - https://arxiv.org/abs/2004.08955 Adapted from original PyTorch impl w/ weights at https://github.com/zhanghang1989/ResNeSt by Hang Zhang Modified for torchscript compat, and consistency with timm by Ross Wightman """ from torch import nn from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SplitAttn from ._builder import build_model_with_cfg from ._registry import register_model, generate_default_cfgs from .resnet import ResNet class ResNestBottleneck(nn.Module): """ResNet Bottleneck """ # pylint: disable=unused-argument expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, radix=1, cardinality=1, base_width=64, avd=False, avd_first=False, is_first=False, reduce_first=1, dilation=1, first_dilation=None, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, attn_layer=None, aa_layer=None, drop_block=None, drop_path=None, ): super(ResNestBottleneck, self).__init__() assert reduce_first == 1 # not supported assert attn_layer is None # not supported assert aa_layer is None # TODO not yet supported assert drop_path is None # TODO not yet supported group_width = int(planes * (base_width / 64.)) * cardinality first_dilation = first_dilation or dilation if avd and (stride > 1 or is_first): avd_stride = stride stride = 1 else: avd_stride = 0 self.radix = radix self.conv1 = nn.Conv2d(inplanes, group_width, kernel_size=1, bias=False) self.bn1 = norm_layer(group_width) self.act1 = act_layer(inplace=True) self.avd_first = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and avd_first else None if self.radix >= 1: self.conv2 = SplitAttn( group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, radix=radix, norm_layer=norm_layer, drop_layer=drop_block) self.bn2 = nn.Identity() self.drop_block = nn.Identity() self.act2 = nn.Identity() else: self.conv2 = nn.Conv2d( group_width, group_width, kernel_size=3, stride=stride, padding=first_dilation, dilation=first_dilation, groups=cardinality, bias=False) self.bn2 = norm_layer(group_width) self.drop_block = drop_block() if drop_block is not None else nn.Identity() self.act2 = act_layer(inplace=True) self.avd_last = nn.AvgPool2d(3, avd_stride, padding=1) if avd_stride > 0 and not avd_first else None self.conv3 = nn.Conv2d(group_width, planes * 4, kernel_size=1, bias=False) self.bn3 = norm_layer(planes*4) self.act3 = act_layer(inplace=True) self.downsample = downsample def zero_init_last(self): if getattr(self.bn3, 'weight', None) is not None: nn.init.zeros_(self.bn3.weight) def forward(self, x): shortcut = x out = self.conv1(x) out = self.bn1(out) out = self.act1(out) if self.avd_first is not None: out = self.avd_first(out) out = self.conv2(out) out = self.bn2(out) out = self.drop_block(out) out = self.act2(out) if self.avd_last is not None: out = self.avd_last(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: shortcut = self.downsample(x) out += shortcut out = self.act3(out) return out def _create_resnest(variant, pretrained=False, **kwargs): return build_model_with_cfg( ResNet, variant, pretrained, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv1.0', 'classifier': 'fc', **kwargs } default_cfgs = generate_default_cfgs({ 'resnest14d.gluon_in1k': _cfg(hf_hub_id='timm/'), 'resnest26d.gluon_in1k': _cfg(hf_hub_id='timm/'), 'resnest50d.in1k': _cfg(hf_hub_id='timm/'), 'resnest101e.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 256, 256), pool_size=(8, 8)), 'resnest200e.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 320, 320), pool_size=(10, 10), crop_pct=0.909, interpolation='bicubic'), 'resnest269e.in1k': _cfg( hf_hub_id='timm/', input_size=(3, 416, 416), pool_size=(13, 13), crop_pct=0.928, interpolation='bicubic'), 'resnest50d_4s2x40d.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic'), 'resnest50d_1s4x24d.in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic') }) @register_model def resnest14d(pretrained=False, **kwargs) -> ResNet: """ ResNeSt-14d model. Weights ported from GluonCV. """ model_kwargs = dict( block=ResNestBottleneck, layers=[1, 1, 1, 1], stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest14d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest26d(pretrained=False, **kwargs) -> ResNet: """ ResNeSt-26d model. Weights ported from GluonCV. """ model_kwargs = dict( block=ResNestBottleneck, layers=[2, 2, 2, 2], stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest26d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest50d(pretrained=False, **kwargs) -> ResNet: """ ResNeSt-50d model. Matches paper ResNeSt-50 model, https://arxiv.org/abs/2004.08955 Since this codebase supports all possible variations, 'd' for deep stem, stem_width 32, avg in downsample. """ model_kwargs = dict( block=ResNestBottleneck, layers=[3, 4, 6, 3], stem_type='deep', stem_width=32, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest50d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest101e(pretrained=False, **kwargs) -> ResNet: """ ResNeSt-101e model. Matches paper ResNeSt-101 model, https://arxiv.org/abs/2004.08955 Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. """ model_kwargs = dict( block=ResNestBottleneck, layers=[3, 4, 23, 3], stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest101e', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest200e(pretrained=False, **kwargs) -> ResNet: """ ResNeSt-200e model. Matches paper ResNeSt-200 model, https://arxiv.org/abs/2004.08955 Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. """ model_kwargs = dict( block=ResNestBottleneck, layers=[3, 24, 36, 3], stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest200e', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest269e(pretrained=False, **kwargs) -> ResNet: """ ResNeSt-269e model. Matches paper ResNeSt-269 model, https://arxiv.org/abs/2004.08955 Since this codebase supports all possible variations, 'e' for deep stem, stem_width 64, avg in downsample. """ model_kwargs = dict( block=ResNestBottleneck, layers=[3, 30, 48, 8], stem_type='deep', stem_width=64, avg_down=True, base_width=64, cardinality=1, block_args=dict(radix=2, avd=True, avd_first=False)) return _create_resnest('resnest269e', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest50d_4s2x40d(pretrained=False, **kwargs) -> ResNet: """ResNeSt-50 4s2x40d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md """ model_kwargs = dict( block=ResNestBottleneck, layers=[3, 4, 6, 3], stem_type='deep', stem_width=32, avg_down=True, base_width=40, cardinality=2, block_args=dict(radix=4, avd=True, avd_first=True)) return _create_resnest('resnest50d_4s2x40d', pretrained=pretrained, **dict(model_kwargs, **kwargs)) @register_model def resnest50d_1s4x24d(pretrained=False, **kwargs) -> ResNet: """ResNeSt-50 1s4x24d from https://github.com/zhanghang1989/ResNeSt/blob/master/ablation.md """ model_kwargs = dict( block=ResNestBottleneck, layers=[3, 4, 6, 3], stem_type='deep', stem_width=32, avg_down=True, base_width=24, cardinality=4, block_args=dict(radix=1, avd=True, avd_first=True)) return _create_resnest('resnest50d_1s4x24d', pretrained=pretrained, **dict(model_kwargs, **kwargs))
pytorch-image-models/timm/models/resnest.py/0
{ "file_path": "pytorch-image-models/timm/models/resnest.py", "repo_id": "pytorch-image-models", "token_count": 4439 }
273
""" TResNet: High Performance GPU-Dedicated Architecture https://arxiv.org/pdf/2003.13630.pdf Original model: https://github.com/mrT23/TResNet """ from collections import OrderedDict from functools import partial from typing import List, Optional, Tuple, Union import torch import torch.nn as nn from timm.layers import SpaceToDepth, BlurPool2d, ClassifierHead, SEModule, ConvNormAct, DropPath from ._builder import build_model_with_cfg from ._features import feature_take_indices from ._manipulate import checkpoint, checkpoint_seq from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['TResNet'] # model_registry will add each entrypoint fn to this class BasicBlock(nn.Module): expansion = 1 def __init__( self, inplanes, planes, stride=1, downsample=None, use_se=True, aa_layer=None, drop_path_rate=0. ): super(BasicBlock, self).__init__() self.downsample = downsample self.stride = stride act_layer = partial(nn.LeakyReLU, negative_slope=1e-3) self.conv1 = ConvNormAct(inplanes, planes, kernel_size=3, stride=stride, act_layer=act_layer, aa_layer=aa_layer) self.conv2 = ConvNormAct(planes, planes, kernel_size=3, stride=1, apply_act=False) self.act = nn.ReLU(inplace=True) rd_chs = max(planes * self.expansion // 4, 64) self.se = SEModule(planes * self.expansion, rd_channels=rd_chs) if use_se else None self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def forward(self, x): if self.downsample is not None: shortcut = self.downsample(x) else: shortcut = x out = self.conv1(x) out = self.conv2(out) if self.se is not None: out = self.se(out) out = self.drop_path(out) + shortcut out = self.act(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__( self, inplanes, planes, stride=1, downsample=None, use_se=True, act_layer=None, aa_layer=None, drop_path_rate=0., ): super(Bottleneck, self).__init__() self.downsample = downsample self.stride = stride act_layer = act_layer or partial(nn.LeakyReLU, negative_slope=1e-3) self.conv1 = ConvNormAct( inplanes, planes, kernel_size=1, stride=1, act_layer=act_layer) self.conv2 = ConvNormAct( planes, planes, kernel_size=3, stride=stride, act_layer=act_layer, aa_layer=aa_layer) reduction_chs = max(planes * self.expansion // 8, 64) self.se = SEModule(planes, rd_channels=reduction_chs) if use_se else None self.conv3 = ConvNormAct( planes, planes * self.expansion, kernel_size=1, stride=1, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.act = nn.ReLU(inplace=True) def forward(self, x): if self.downsample is not None: shortcut = self.downsample(x) else: shortcut = x out = self.conv1(x) out = self.conv2(out) if self.se is not None: out = self.se(out) out = self.conv3(out) out = self.drop_path(out) + shortcut out = self.act(out) return out class TResNet(nn.Module): def __init__( self, layers, in_chans=3, num_classes=1000, width_factor=1.0, v2=False, global_pool='fast', drop_rate=0., drop_path_rate=0., ): self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False super(TResNet, self).__init__() aa_layer = BlurPool2d act_layer = nn.LeakyReLU # TResnet stages self.inplanes = int(64 * width_factor) self.planes = int(64 * width_factor) if v2: self.inplanes = self.inplanes // 8 * 8 self.planes = self.planes // 8 * 8 dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] conv1 = ConvNormAct(in_chans * 16, self.planes, stride=1, kernel_size=3, act_layer=act_layer) layer1 = self._make_layer( Bottleneck if v2 else BasicBlock, self.planes, layers[0], stride=1, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[0]) layer2 = self._make_layer( Bottleneck if v2 else BasicBlock, self.planes * 2, layers[1], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[1]) layer3 = self._make_layer( Bottleneck, self.planes * 4, layers[2], stride=2, use_se=True, aa_layer=aa_layer, drop_path_rate=dpr[2]) layer4 = self._make_layer( Bottleneck, self.planes * 8, layers[3], stride=2, use_se=False, aa_layer=aa_layer, drop_path_rate=dpr[3]) # body self.body = nn.Sequential(OrderedDict([ ('s2d', SpaceToDepth()), ('conv1', conv1), ('layer1', layer1), ('layer2', layer2), ('layer3', layer3), ('layer4', layer4), ])) self.feature_info = [ dict(num_chs=self.planes, reduction=2, module=''), # Not with S2D? dict(num_chs=self.planes * (Bottleneck.expansion if v2 else 1), reduction=4, module='body.layer1'), dict(num_chs=self.planes * 2 * (Bottleneck.expansion if v2 else 1), reduction=8, module='body.layer2'), dict(num_chs=self.planes * 4 * Bottleneck.expansion, reduction=16, module='body.layer3'), dict(num_chs=self.planes * 8 * Bottleneck.expansion, reduction=32, module='body.layer4'), ] # head self.num_features = self.head_hidden_size = (self.planes * 8) * Bottleneck.expansion self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate) # model initialization for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu') if isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01) # residual connections special initialization for m in self.modules(): if isinstance(m, BasicBlock): nn.init.zeros_(m.conv2.bn.weight) if isinstance(m, Bottleneck): nn.init.zeros_(m.conv3.bn.weight) def _make_layer(self, block, planes, blocks, stride=1, use_se=True, aa_layer=None, drop_path_rate=0.): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: layers = [] if stride == 2: # avg pooling before 1x1 conv layers.append(nn.AvgPool2d(kernel_size=2, stride=2, ceil_mode=True, count_include_pad=False)) layers += [ConvNormAct( self.inplanes, planes * block.expansion, kernel_size=1, stride=1, apply_act=False)] downsample = nn.Sequential(*layers) layers = [] for i in range(blocks): layers.append(block( self.inplanes, planes, stride=stride if i == 0 else 1, downsample=downsample if i == 0 else None, use_se=use_se, aa_layer=aa_layer, drop_path_rate=drop_path_rate[i] if isinstance(drop_path_rate, list) else drop_path_rate, )) self.inplanes = planes * block.expansion return nn.Sequential(*layers) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict(stem=r'^body\.conv1', blocks=r'^body\.layer(\d+)' if coarse else r'^body\.layer(\d+)\.(\d+)') return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self) -> nn.Module: return self.head.fc def reset_classifier(self, num_classes: int, global_pool: Optional[str] = None): self.num_classes = num_classes self.head.reset(num_classes, pool_type=global_pool) def forward_intermediates( self, x: torch.Tensor, indices: Optional[Union[int, List[int]]] = None, norm: bool = False, stop_early: bool = False, output_fmt: str = 'NCHW', intermediates_only: bool = False, ) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]: """ Forward features that returns intermediates. Args: x: Input image tensor indices: Take last n blocks if int, all if None, select matching indices if sequence norm: Apply norm layer to compatible intermediates stop_early: Stop iterating over blocks when last desired intermediate hit output_fmt: Shape of intermediate feature outputs intermediates_only: Only return intermediate features Returns: """ assert output_fmt in ('NCHW',), 'Output shape must be NCHW.' intermediates = [] stage_ends = [1, 2, 3, 4, 5] take_indices, max_index = feature_take_indices(len(stage_ends), indices) take_indices = [stage_ends[i] for i in take_indices] max_index = stage_ends[max_index] # forward pass if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript stages = self.body else: stages = self.body[:max_index + 1] for feat_idx, stage in enumerate(stages): if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(stage, x) else: x = stage(x) if feat_idx in take_indices: intermediates.append(x) if intermediates_only: return intermediates return x, intermediates def prune_intermediate_layers( self, indices: Union[int, List[int]] = 1, prune_norm: bool = False, prune_head: bool = True, ): """ Prune layers not required for specified intermediates. """ stage_ends = [1, 2, 3, 4, 5] take_indices, max_index = feature_take_indices(len(stage_ends), indices) max_index = stage_ends[max_index] self.body = self.body[:max_index + 1] # truncate blocks w/ stem as idx 0 if prune_head: self.reset_classifier(0, '') return take_indices def forward_features(self, x): if self.grad_checkpointing and not torch.jit.is_scripting(): x = self.body.s2d(x) x = self.body.conv1(x) x = checkpoint_seq([ self.body.layer1, self.body.layer2, self.body.layer3, self.body.layer4], x, flatten=True) else: x = self.body(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model): if 'body.conv1.conv.weight' in state_dict: return state_dict import re state_dict = state_dict.get('model', state_dict) state_dict = state_dict.get('state_dict', state_dict) out_dict = {} for k, v in state_dict.items(): k = re.sub(r'conv(\d+)\.0.0', lambda x: f'conv{int(x.group(1))}.conv', k) k = re.sub(r'conv(\d+)\.0.1', lambda x: f'conv{int(x.group(1))}.bn', k) k = re.sub(r'conv(\d+)\.0', lambda x: f'conv{int(x.group(1))}.conv', k) k = re.sub(r'conv(\d+)\.1', lambda x: f'conv{int(x.group(1))}.bn', k) k = re.sub(r'downsample\.(\d+)\.0', lambda x: f'downsample.{int(x.group(1))}.conv', k) k = re.sub(r'downsample\.(\d+)\.1', lambda x: f'downsample.{int(x.group(1))}.bn', k) if k.endswith('bn.weight'): # convert weight from inplace_abn to batchnorm v = v.abs().add(1e-5) out_dict[k] = v return out_dict def _create_tresnet(variant, pretrained=False, **kwargs): return build_model_with_cfg( TResNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(out_indices=(1, 2, 3, 4), flatten_sequential=True), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': (0., 0., 0.), 'std': (1., 1., 1.), 'first_conv': 'body.conv1.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ 'tresnet_m.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_m.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221), 'tresnet_m.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_l.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_xl.miil_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_m.miil_in1k_448': _cfg( input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_l.miil_in1k_448': _cfg( input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_xl.miil_in1k_448': _cfg( input_size=(3, 448, 448), pool_size=(14, 14), hf_hub_id='timm/'), 'tresnet_v2_l.miil_in21k_ft_in1k': _cfg(hf_hub_id='timm/'), 'tresnet_v2_l.miil_in21k': _cfg(hf_hub_id='timm/', num_classes=11221), }) @register_model def tresnet_m(pretrained=False, **kwargs) -> TResNet: model_args = dict(layers=[3, 4, 11, 3]) return _create_tresnet('tresnet_m', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def tresnet_l(pretrained=False, **kwargs) -> TResNet: model_args = dict(layers=[4, 5, 18, 3], width_factor=1.2) return _create_tresnet('tresnet_l', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def tresnet_xl(pretrained=False, **kwargs) -> TResNet: model_args = dict(layers=[4, 5, 24, 3], width_factor=1.3) return _create_tresnet('tresnet_xl', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def tresnet_v2_l(pretrained=False, **kwargs) -> TResNet: model_args = dict(layers=[3, 4, 23, 3], width_factor=1.0, v2=True) return _create_tresnet('tresnet_v2_l', pretrained=pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, { 'tresnet_m_miil_in21k': 'tresnet_m.miil_in21k', 'tresnet_m_448': 'tresnet_m.miil_in1k_448', 'tresnet_l_448': 'tresnet_l.miil_in1k_448', 'tresnet_xl_448': 'tresnet_xl.miil_in1k_448', })
pytorch-image-models/timm/models/tresnet.py/0
{ "file_path": "pytorch-image-models/timm/models/tresnet.py", "repo_id": "pytorch-image-models", "token_count": 7310 }
274
import logging from itertools import islice from typing import Collection, Optional from torch import nn as nn from timm.models import group_parameters _logger = logging.getLogger(__name__) def param_groups_weight_decay( model: nn.Module, weight_decay: float = 1e-5, no_weight_decay_list: Collection[str] = (), ): no_weight_decay_list = set(no_weight_decay_list) decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue if param.ndim <= 1 or name.endswith(".bias") or name in no_weight_decay_list: no_decay.append(param) else: decay.append(param) return [ {'params': no_decay, 'weight_decay': 0.}, {'params': decay, 'weight_decay': weight_decay}] def _group(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def auto_group_layers(model, layers_per_group=12, num_groups=None): def _in_head(n, hp): if not hp: return True elif isinstance(hp, (tuple, list)): return any([n.startswith(hpi) for hpi in hp]) else: return n.startswith(hp) head_prefix = getattr(model, 'pretrained_cfg', {}).get('classifier', None) names_trunk = [] names_head = [] for n, _ in model.named_parameters(): names_head.append(n) if _in_head(n, head_prefix) else names_trunk.append(n) # group non-head layers num_trunk_layers = len(names_trunk) if num_groups is not None: layers_per_group = -(num_trunk_layers // -num_groups) names_trunk = list(_group(names_trunk, layers_per_group)) num_trunk_groups = len(names_trunk) layer_map = {n: i for i, l in enumerate(names_trunk) for n in l} layer_map.update({n: num_trunk_groups for n in names_head}) return layer_map _layer_map = auto_group_layers # backward compat def param_groups_layer_decay( model: nn.Module, weight_decay: float = 0.05, no_weight_decay_list: Collection[str] = (), weight_decay_exclude_1d: bool = True, layer_decay: float = .75, end_layer_decay: Optional[float] = None, min_scale: float = 0., no_opt_scale: Optional[float] = None, verbose: bool = False, ): """ Parameter groups for layer-wise lr decay & weight decay Based on BEiT: https://github.com/microsoft/unilm/blob/master/beit/optim_factory.py#L58 """ no_weight_decay_list = set(no_weight_decay_list) param_group_names = {} # NOTE for debugging param_groups = {} if hasattr(model, 'group_matcher'): # FIXME interface needs more work layer_map = group_parameters(model, model.group_matcher(coarse=False), reverse=True) else: # fallback layer_map = auto_group_layers(model) num_layers = max(layer_map.values()) + 1 layer_max = num_layers - 1 layer_scales = list(max(min_scale, layer_decay ** (layer_max - i)) for i in range(num_layers)) for name, param in model.named_parameters(): if not param.requires_grad: continue # no decay: all 1D parameters and model specific ones if (weight_decay_exclude_1d and param.ndim <= 1) or name in no_weight_decay_list: g_decay = "no_decay" this_decay = 0. else: g_decay = "decay" this_decay = weight_decay layer_id = layer_map.get(name, layer_max) this_scale = layer_scales[layer_id] if no_opt_scale and this_scale < no_opt_scale: # if the calculated layer scale is below this, exclude from optimization param.requires_grad = False continue group_name = "layer_%d_%s" % (layer_id, g_decay) if group_name not in param_groups: param_group_names[group_name] = { "lr_scale": this_scale, "weight_decay": this_decay, "param_names": [], } param_groups[group_name] = { "lr_scale": this_scale, "weight_decay": this_decay, "params": [], } param_group_names[group_name]["param_names"].append(name) param_groups[group_name]["params"].append(param) if verbose: import json _logger.info("parameter groups: \n%s" % json.dumps(param_group_names, indent=2)) return list(param_groups.values())
pytorch-image-models/timm/optim/_param_groups.py/0
{ "file_path": "pytorch-image-models/timm/optim/_param_groups.py", "repo_id": "pytorch-image-models", "token_count": 2036 }
275
""" PyTorch MADGRAD optimizer MADGRAD: https://arxiv.org/abs/2101.11075 Code from: https://github.com/facebookresearch/madgrad """ # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from typing import TYPE_CHECKING, Any, Callable, Optional import torch import torch.optim if TYPE_CHECKING: from torch.optim.optimizer import _params_t else: _params_t = Any class MADGRAD(torch.optim.Optimizer): """ MADGRAD_: A Momentumized, Adaptive, Dual Averaged Gradient Method for Stochastic Optimization. .. _MADGRAD: https://arxiv.org/abs/2101.11075 MADGRAD is a general purpose optimizer that can be used in place of SGD or Adam may converge faster and generalize better. Currently GPU-only. Typically, the same learning rate schedule that is used for SGD or Adam may be used. The overall learning rate is not comparable to either method and should be determined by a hyper-parameter sweep. MADGRAD requires less weight decay than other methods, often as little as zero. Momentum values used for SGD or Adam's beta1 should work here also. On sparse problems both weight_decay and momentum should be set to 0. Arguments: params (iterable): Iterable of parameters to optimize or dicts defining parameter groups. lr (float): Learning rate (default: 1e-2). momentum (float): Momentum value in the range [0,1) (default: 0.9). weight_decay (float): Weight decay, i.e. a L2 penalty (default: 0). eps (float): Term added to the denominator outside of the root operation to improve numerical stability. (default: 1e-6). """ def __init__( self, params: _params_t, lr: float = 1e-2, momentum: float = 0.9, weight_decay: float = 0, eps: float = 1e-6, decoupled_decay: bool = False, ): if momentum < 0 or momentum >= 1: raise ValueError(f"Momentum {momentum} must be in the range [0,1]") if lr <= 0: raise ValueError(f"Learning rate {lr} must be positive") if weight_decay < 0: raise ValueError(f"Weight decay {weight_decay} must be non-negative") if eps < 0: raise ValueError(f"Eps must be non-negative") defaults = dict( lr=lr, eps=eps, momentum=momentum, weight_decay=weight_decay, decoupled_decay=decoupled_decay, ) super().__init__(params, defaults) @property def supports_memory_efficient_fp16(self) -> bool: return False @property def supports_flat_params(self) -> bool: return True @torch.no_grad() def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]: """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: eps = group['eps'] lr = group['lr'] + eps weight_decay = group['weight_decay'] momentum = group['momentum'] ck = 1 - momentum for p in group["params"]: if p.grad is None: continue grad = p.grad if momentum != 0.0 and grad.is_sparse: raise RuntimeError("momentum != 0 is not compatible with sparse gradients") state = self.state[p] if len(state) == 0: state['step'] = 0 state['grad_sum_sq'] = torch.zeros_like(p) state['s'] = torch.zeros_like(p) if momentum != 0: state['x0'] = torch.clone(p).detach() state['step'] += 1 grad_sum_sq = state['grad_sum_sq'] s = state['s'] lamb = lr * math.sqrt(state['step']) # Apply weight decay if weight_decay != 0: if group['decoupled_decay']: p.mul_(1.0 - group['lr'] * weight_decay) else: if grad.is_sparse: raise RuntimeError("weight_decay option is not compatible with sparse gradients") grad.add_(p, alpha=weight_decay) if grad.is_sparse: grad = grad.coalesce() grad_val = grad._values() p_masked = p.sparse_mask(grad) grad_sum_sq_masked = grad_sum_sq.sparse_mask(grad) s_masked = s.sparse_mask(grad) # Compute x_0 from other known quantities rms_masked_vals = grad_sum_sq_masked._values().pow(1 / 3).add_(eps) x0_masked_vals = p_masked._values().addcdiv(s_masked._values(), rms_masked_vals, value=1) # Dense + sparse op grad_sq = grad * grad grad_sum_sq.add_(grad_sq, alpha=lamb) grad_sum_sq_masked.add_(grad_sq, alpha=lamb) rms_masked_vals = grad_sum_sq_masked._values().pow_(1 / 3).add_(eps) s.add_(grad, alpha=lamb) s_masked._values().add_(grad_val, alpha=lamb) # update masked copy of p p_kp1_masked_vals = x0_masked_vals.addcdiv(s_masked._values(), rms_masked_vals, value=-1) # Copy updated masked p to dense p using an add operation p_masked._values().add_(p_kp1_masked_vals, alpha=-1) p.add_(p_masked, alpha=-1) else: if momentum == 0: # Compute x_0 from other known quantities rms = grad_sum_sq.pow(1 / 3).add_(eps) x0 = p.addcdiv(s, rms, value=1) else: x0 = state['x0'] # Accumulate second moments grad_sum_sq.addcmul_(grad, grad, value=lamb) rms = grad_sum_sq.pow(1 / 3).add_(eps) # Update s s.add_(grad, alpha=lamb) # Step if momentum == 0: p.copy_(x0.addcdiv(s, rms, value=-1)) else: z = x0.addcdiv(s, rms, value=-1) # p is a moving average of z p.mul_(1 - ck).add_(z, alpha=ck) return loss
pytorch-image-models/timm/optim/madgrad.py/0
{ "file_path": "pytorch-image-models/timm/optim/madgrad.py", "repo_id": "pytorch-image-models", "token_count": 3562 }
276
import abc from abc import ABC from typing import Any, Dict, List, Optional import torch class Scheduler(ABC): """ Parameter Scheduler Base Class A scheduler base class that can be used to schedule any optimizer parameter groups. Unlike the builtin PyTorch schedulers, this is intended to be consistently called * At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value * At the END of each optimizer update, after incrementing the update count, to calculate next update's value The schedulers built on this should try to remain as stateless as possible (for simplicity). This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch' and -1 values for special behaviour. All epoch and update counts must be tracked in the training code and explicitly passed in to the schedulers on the corresponding step or step_update call. Based on ideas from: * https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler * https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers """ def __init__( self, optimizer: torch.optim.Optimizer, param_group_field: str, t_in_epochs: bool = True, noise_range_t=None, noise_type='normal', noise_pct=0.67, noise_std=1.0, noise_seed=None, initialize: bool = True, ) -> None: self.optimizer = optimizer self.param_group_field = param_group_field self._initial_param_group_field = f"initial_{param_group_field}" if initialize: for i, group in enumerate(self.optimizer.param_groups): if param_group_field not in group: raise KeyError(f"{param_group_field} missing from param_groups[{i}]") group.setdefault(self._initial_param_group_field, group[param_group_field]) else: for i, group in enumerate(self.optimizer.param_groups): if self._initial_param_group_field not in group: raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]") self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups] self.metric = None # any point to having this for all? self.t_in_epochs = t_in_epochs self.noise_range_t = noise_range_t self.noise_pct = noise_pct self.noise_type = noise_type self.noise_std = noise_std self.noise_seed = noise_seed if noise_seed is not None else 42 self.update_groups(self.base_values) def state_dict(self) -> Dict[str, Any]: return {key: value for key, value in self.__dict__.items() if key != 'optimizer'} def load_state_dict(self, state_dict: Dict[str, Any]) -> None: self.__dict__.update(state_dict) @abc.abstractmethod def _get_lr(self, t: int) -> List[float]: pass def _get_values(self, t: int, on_epoch: bool = True) -> Optional[List[float]]: proceed = (on_epoch and self.t_in_epochs) or (not on_epoch and not self.t_in_epochs) if not proceed: return None return self._get_lr(t) def step(self, epoch: int, metric: float = None) -> None: self.metric = metric values = self._get_values(epoch, on_epoch=True) if values is not None: values = self._add_noise(values, epoch) self.update_groups(values) def step_update(self, num_updates: int, metric: float = None): self.metric = metric values = self._get_values(num_updates, on_epoch=False) if values is not None: values = self._add_noise(values, num_updates) self.update_groups(values) def update_groups(self, values): if not isinstance(values, (list, tuple)): values = [values] * len(self.optimizer.param_groups) for param_group, value in zip(self.optimizer.param_groups, values): if 'lr_scale' in param_group: param_group[self.param_group_field] = value * param_group['lr_scale'] else: param_group[self.param_group_field] = value def _add_noise(self, lrs, t): if self._is_apply_noise(t): noise = self._calculate_noise(t) lrs = [v + v * noise for v in lrs] return lrs def _is_apply_noise(self, t) -> bool: """Return True if scheduler in noise range.""" apply_noise = False if self.noise_range_t is not None: if isinstance(self.noise_range_t, (list, tuple)): apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1] else: apply_noise = t >= self.noise_range_t return apply_noise def _calculate_noise(self, t) -> float: g = torch.Generator() g.manual_seed(self.noise_seed + t) if self.noise_type == 'normal': while True: # resample if noise out of percent limit, brute force but shouldn't spin much noise = torch.randn(1, generator=g).item() if abs(noise) < self.noise_pct: return noise else: noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct return noise
pytorch-image-models/timm/scheduler/scheduler.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/scheduler.py", "repo_id": "pytorch-image-models", "token_count": 2368 }
277
""" Model / state_dict utils Hacked together by / Copyright 2020 Ross Wightman """ import fnmatch from copy import deepcopy import torch from torchvision.ops.misc import FrozenBatchNorm2d from timm.layers import BatchNormAct2d, SyncBatchNormAct, FrozenBatchNormAct2d,\ freeze_batch_norm_2d, unfreeze_batch_norm_2d from .model_ema import ModelEma def unwrap_model(model): if isinstance(model, ModelEma): return unwrap_model(model.ema) else: if hasattr(model, 'module'): return unwrap_model(model.module) elif hasattr(model, '_orig_mod'): return unwrap_model(model._orig_mod) else: return model def get_state_dict(model, unwrap_fn=unwrap_model): return unwrap_fn(model).state_dict() def avg_sq_ch_mean(model, input, output): """ calculate average channel square mean of output activations """ return torch.mean(output.mean(axis=[0, 2, 3]) ** 2).item() def avg_ch_var(model, input, output): """ calculate average channel variance of output activations """ return torch.mean(output.var(axis=[0, 2, 3])).item() def avg_ch_var_residual(model, input, output): """ calculate average channel variance of output activations """ return torch.mean(output.var(axis=[0, 2, 3])).item() class ActivationStatsHook: """Iterates through each of `model`'s modules and matches modules using unix pattern matching based on `hook_fn_locs` and registers `hook_fn` to the module if there is a match. Arguments: model (nn.Module): model from which we will extract the activation stats hook_fn_locs (List[str]): List of `hook_fn` locations based on Unix type string matching with the name of model's modules. hook_fns (List[Callable]): List of hook functions to be registered at every module in `layer_names`. Inspiration from https://docs.fast.ai/callback.hook.html. Refer to https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 for an example on how to plot Signal Propagation Plots using `ActivationStatsHook`. """ def __init__(self, model, hook_fn_locs, hook_fns): self.model = model self.hook_fn_locs = hook_fn_locs self.hook_fns = hook_fns if len(hook_fn_locs) != len(hook_fns): raise ValueError("Please provide `hook_fns` for each `hook_fn_locs`, \ their lengths are different.") self.stats = dict((hook_fn.__name__, []) for hook_fn in hook_fns) for hook_fn_loc, hook_fn in zip(hook_fn_locs, hook_fns): self.register_hook(hook_fn_loc, hook_fn) def _create_hook(self, hook_fn): def append_activation_stats(module, input, output): out = hook_fn(module, input, output) self.stats[hook_fn.__name__].append(out) return append_activation_stats def register_hook(self, hook_fn_loc, hook_fn): for name, module in self.model.named_modules(): if not fnmatch.fnmatch(name, hook_fn_loc): continue module.register_forward_hook(self._create_hook(hook_fn)) def extract_spp_stats( model, hook_fn_locs, hook_fns, input_shape=[8, 3, 224, 224]): """Extract average square channel mean and variance of activations during forward pass to plot Signal Propagation Plots (SPP). Paper: https://arxiv.org/abs/2101.08692 Example Usage: https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 """ x = torch.normal(0., 1., input_shape) hook = ActivationStatsHook(model, hook_fn_locs=hook_fn_locs, hook_fns=hook_fns) _ = model(x) return hook.stats def _freeze_unfreeze(root_module, submodules=[], include_bn_running_stats=True, mode='freeze'): """ Freeze or unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. Args: root_module (nn.Module, optional): Root module relative to which the `submodules` are referenced. submodules (list[str]): List of modules for which the parameters will be (un)frozen. They are to be provided as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list means that the whole root module will be (un)frozen. Defaults to [] include_bn_running_stats (bool): Whether to also (un)freeze the running statistics of batch norm 2d layers. Defaults to `True`. mode (bool): Whether to freeze ("freeze") or unfreeze ("unfreeze"). Defaults to `"freeze"`. """ assert mode in ["freeze", "unfreeze"], '`mode` must be one of "freeze" or "unfreeze"' if isinstance(root_module, ( torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm, BatchNormAct2d, SyncBatchNormAct, )): # Raise assertion here because we can't convert it in place raise AssertionError( "You have provided a batch norm layer as the `root module`. Please use " "`timm.utils.model.freeze_batch_norm_2d` or `timm.utils.model.unfreeze_batch_norm_2d` instead.") if isinstance(submodules, str): submodules = [submodules] named_modules = submodules submodules = [root_module.get_submodule(m) for m in submodules] if not len(submodules): named_modules, submodules = list(zip(*root_module.named_children())) for n, m in zip(named_modules, submodules): # (Un)freeze parameters for p in m.parameters(): p.requires_grad = False if mode == 'freeze' else True if include_bn_running_stats: # Helper to add submodule specified as a named_module def _add_submodule(module, name, submodule): split = name.rsplit('.', 1) if len(split) > 1: module.get_submodule(split[0]).add_module(split[1], submodule) else: module.add_module(name, submodule) # Freeze batch norm if mode == 'freeze': res = freeze_batch_norm_2d(m) # It's possible that `m` is a type of BatchNorm in itself, in which case `unfreeze_batch_norm_2d` won't # convert it in place, but will return the converted result. In this case `res` holds the converted # result and we may try to re-assign the named module if isinstance(m, ( torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm, BatchNormAct2d, SyncBatchNormAct, )): _add_submodule(root_module, n, res) # Unfreeze batch norm else: res = unfreeze_batch_norm_2d(m) # Ditto. See note above in mode == 'freeze' branch if isinstance(m, (FrozenBatchNorm2d, FrozenBatchNormAct2d)): _add_submodule(root_module, n, res) def freeze(root_module, submodules=[], include_bn_running_stats=True): """ Freeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. Args: root_module (nn.Module): Root module relative to which `submodules` are referenced. submodules (list[str]): List of modules for which the parameters will be frozen. They are to be provided as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list means that the whole root module will be frozen. Defaults to `[]`. include_bn_running_stats (bool): Whether to also freeze the running statistics of `BatchNorm2d` and `SyncBatchNorm` layers. These will be converted to `FrozenBatchNorm2d` in place. Hint: During fine tuning, it's good practice to freeze batch norm stats. And note that these are different to the affine parameters which are just normal PyTorch parameters. Defaults to `True`. Hint: If you want to freeze batch norm ONLY, use `timm.utils.model.freeze_batch_norm_2d`. Examples:: >>> model = timm.create_model('resnet18') >>> # Freeze up to and including layer2 >>> submodules = [n for n, _ in model.named_children()] >>> print(submodules) ['conv1', 'bn1', 'act1', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4', 'global_pool', 'fc'] >>> freeze(model, submodules[:submodules.index('layer2') + 1]) >>> # Check for yourself that it works as expected >>> print(model.layer2[0].conv1.weight.requires_grad) False >>> print(model.layer3[0].conv1.weight.requires_grad) True >>> # Unfreeze >>> unfreeze(model) """ _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="freeze") def unfreeze(root_module, submodules=[], include_bn_running_stats=True): """ Unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. Args: root_module (nn.Module): Root module relative to which `submodules` are referenced. submodules (list[str]): List of submodules for which the parameters will be (un)frozen. They are to be provided as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list means that the whole root module will be unfrozen. Defaults to `[]`. include_bn_running_stats (bool): Whether to also unfreeze the running statistics of `FrozenBatchNorm2d` layers. These will be converted to `BatchNorm2d` in place. Defaults to `True`. See example in docstring for `freeze`. """ _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="unfreeze") def reparameterize_model(model: torch.nn.Module, inplace=False) -> torch.nn.Module: if not inplace: model = deepcopy(model) def _fuse(m): for child_name, child in m.named_children(): if hasattr(child, 'fuse'): setattr(m, child_name, child.fuse()) elif hasattr(child, "reparameterize"): child.reparameterize() elif hasattr(child, "switch_to_deploy"): child.switch_to_deploy() _fuse(child) _fuse(model) return model
pytorch-image-models/timm/utils/model.py/0
{ "file_path": "pytorch-image-models/timm/utils/model.py", "repo_id": "pytorch-image-models", "token_count": 4328 }
278
# Using different models [[open-in-colab]] `smolagents` provides a flexible framework that allows you to use various language models from different providers. This guide will show you how to use different model types with your agents. ## Available model types `smolagents` supports several model types out of the box: 1. [`InferenceClientModel`]: Uses Hugging Face's Inference API to access models 2. [`TransformersModel`]: Runs models locally using the Transformers library 3. [`VLLMModel`]: Uses vLLM for fast inference with optimized serving 4. [`MLXModel`]: Optimized for Apple Silicon devices using MLX 5. [`LiteLLMModel`]: Provides access to hundreds of LLMs through LiteLLM 6. [`LiteLLMRouterModel`]: Distributes requests among multiple models 7. [`OpenAIServerModel`]: Provides access to any provider that implements an OpenAI-compatible API 8. [`AzureOpenAIServerModel`]: Uses Azure's OpenAI service 9. [`AmazonBedrockServerModel`]: Connects to AWS Bedrock's API All model classes support passing additional keyword arguments (like `temperature`, `max_tokens`, `top_p`, etc.) directly at instantiation time. These parameters are automatically forwarded to the underlying model's completion calls, allowing you to configure model behavior such as creativity, response length, and sampling strategies. ## Using Google Gemini Models As explained in the Google Gemini API documentation (https://ai.google.dev/gemini-api/docs/openai), Google provides an OpenAI-compatible API for Gemini models, allowing you to use the [`OpenAIServerModel`] with Gemini models by setting the appropriate base URL. First, install the required dependencies: ```bash pip install smolagents[openai] ``` Then, [get a Gemini API key](https://ai.google.dev/gemini-api/docs/api-key) and set it in your code: ```python GEMINI_API_KEY = <YOUR-GEMINI-API-KEY> ``` Now, you can initialize the Gemini model using the `OpenAIServerModel` class and setting the `api_base` parameter to the Gemini API base URL: ```python from smolagents import OpenAIServerModel model = OpenAIServerModel( model_id="gemini-2.0-flash", # Google Gemini OpenAI-compatible API base URL api_base="https://generativelanguage.googleapis.com/v1beta/openai/", api_key=GEMINI_API_KEY, ) ``` ## Using OpenRouter Models OpenRouter provides access to a wide variety of language models through a unified OpenAI-compatible API. You can use the [`OpenAIServerModel`] to connect to OpenRouter by setting the appropriate base URL. First, install the required dependencies: ```bash pip install smolagents[openai] ``` Then, [get an OpenRouter API key](https://openrouter.ai/keys) and set it in your code: ```python OPENROUTER_API_KEY = <YOUR-OPENROUTER-API-KEY> ``` Now, you can initialize any model available on OpenRouter using the `OpenAIServerModel` class: ```python from smolagents import OpenAIServerModel model = OpenAIServerModel( # You can use any model ID available on OpenRouter model_id="openai/gpt-4o", # OpenRouter API base URL api_base="https://openrouter.ai/api/v1", api_key=OPENROUTER_API_KEY, ) ```
smolagents/docs/source/en/examples/using_different_models.md/0
{ "file_path": "smolagents/docs/source/en/examples/using_different_models.md", "repo_id": "smolagents", "token_count": 925 }
279
# Agents का परिचय ## 🤔 Agents क्या हैं? AI का उपयोग करने वाली किसी भी कुशल प्रणाली को LLM को वास्तविक दुनिया तक किसी प्रकार की पहुंच प्रदान करने की आवश्यकता होगी: उदाहरण के लिए बाहरी जानकारी प्राप्त करने के लिए एक खोज टूल को कॉल करने की संभावना, या किसी कार्य को हल करने के लिए कुछ प्रोग्राम पर कार्य करने की। दूसरे शब्दों में, LLM में ***agency*** होनी चाहिए। एजेंटिक प्रोग्राम LLM के लिए बाहरी दुनिया का प्रवेश द्वार हैं। > [!TIP] > AI Agents वे **प्रोग्राम हैं जहां LLM आउटपुट वर्कफ़्लो को नियंत्रित करते हैं**। LLM का उपयोग करने वाली कोई भी प्रणाली LLM आउटपुट को कोड में एकीकृत करेगी। कोड वर्कफ़्लो पर LLM के इनपुट का प्रभाव सिस्टम में LLM की एजेंसी का स्तर है। ध्यान दें कि इस परिभाषा के साथ, "agent" एक अलग, 0 या 1 परिभाषा नहीं है: इसके बजाय, "agency" एक निरंतर स्पेक्ट्रम पर विकसित होती है, जैसे-जैसे आप अपने वर्कफ़्लो पर LLM को अधिक या कम शक्ति देते हैं। नीचे दी गई तालिका में देखें कि कैसे एजेंसी विभिन्न प्रणालियों में भिन्न हो सकती है: | एजेंसी स्तर | विवरण | इसे क्या कहा जाता है | उदाहरण पैटर्न | |------------|---------|-------------------|----------------| | ☆☆☆ | LLM आउटपुट का प्रोग्राम प्रवाह पर कोई प्रभाव नहीं | सरल प्रोसेसर | `process_llm_output(llm_response)` | | ★☆☆ | LLM आउटपुट if/else स्विच निर्धारित करता है | राउटर | `if llm_decision(): path_a() else: path_b()` | | ★★☆ | LLM आउटपुट फंक्शन एक्जीक्यूशन निर्धारित करता है | टूल कॉलर | `run_function(llm_chosen_tool, llm_chosen_args)` | | ★★★ | LLM आउटपुट पुनरावृत्ति और प्रोग्राम की निरंतरता को नियंत्रित करता है | मल्टी-स्टेप एजेंट | `while llm_should_continue(): execute_next_step()` | | ★★★ | एक एजेंटिक वर्कफ़्लो दूसरे एजेंटिक वर्कफ़्लो को शुरू कर सकता है | मल्टी-एजेंट | `if llm_trigger(): execute_agent()` | मल्टी-स्टेप agent की यह कोड संरचना है: ```python memory = [user_defined_task] while llm_should_continue(memory): # यह लूप मल्टी-स्टेप भाग है action = llm_get_next_action(memory) # यह टूल-कॉलिंग भाग है observations = execute_action(action) memory += [action, observations] ``` यह एजेंटिक सिस्टम एक लूप में चलता है, प्रत्येक चरण में एक नई क्रिया को शुरू करता है (क्रिया में कुछ पूर्व-निर्धारित *tools* को कॉल करना शामिल हो सकता है जो केवल फंक्शंस हैं), जब तक कि उसके अवलोकन से यह स्पष्ट न हो जाए कि दिए गए कार्य को हल करने के लिए एक संतोषजनक स्थिति प्राप्त कर ली गई है। ## ✅ Agents का उपयोग कब करें / ⛔ कब उनसे बचें Agents तब उपयोगी होते हैं जब आपको किसी ऐप के वर्कफ़्लो को निर्धारित करने के लिए LLM की आवश्यकता होती है। लेकिन वे अक्सर जरूरत से ज्यादा होते हैं। सवाल यह है कि, क्या मुझे वास्तव में दिए गए कार्य को कुशलतापूर्वक हल करने के लिए वर्कफ़्लो में लचीलेपन की आवश्यकता है? यदि पूर्व-निर्धारित वर्कफ़्लो बहुत बार विफल होता है, तो इसका मतलब है कि आपको अधिक लचीलेपन की आवश्यकता है। आइए एक उदाहरण लेते हैं: मान लीजिए आप एक ऐप बना रहे हैं जो एक सर्फिंग ट्रिप वेबसाइट पर ग्राहक अनुरोधों को संभालता है। आप पहले से जान सकते हैं कि अनुरोध 2 में से किसी एक श्रेणी में आएंगे (उपयोगकर्ता की पसंद के आधार पर), और आपके पास इन 2 मामलों में से प्रत्येक के लिए एक पूर्व-निर्धारित वर्कफ़्लो है। 1. ट्रिप के बारे में कुछ जानकारी चाहिए? ⇒ उन्हें अपने नॉलेज बेस में खोज करने के लिए एक सर्च बार तक पहुंच दें 2. सेल्स टीम से बात करना चाहते हैं? ⇒ उन्हें एक संपर्क फॉर्म में टाइप करने दें। यदि वह निर्धारणात्मक वर्कफ़्लो सभी प्रश्नों के लिए फिट बैठता है, तो बेशक बस सब कुछ कोड करें! यह आपको एक 100% विश्वसनीय सिस्टम देगा और एलएलएम द्वारा अनपेक्षित कार्यप्रवाह में हस्तक्षेप करने से त्रुटियों का कोई जोखिम नहीं होगा। साधारणता और मजबूती के लिए, सलाह दी जाती है कि एजेंटिक व्यवहार का उपयोग न किया जाए। लेकिन क्या होगा अगर वर्कफ़्लो को पहले से इतनी अच्छी तरह से निर्धारित नहीं किया जा सकता? उदाहरण के लिए, एक उपयोगकर्ता पूछना चाहता है: `"मैं सोमवार को आ सकता हूं, लेकिन मैं अपना पासपोर्ट भूल गया जिससे मुझे बुधवार तक देर हो सकती है, क्या आप मुझे और मेरी चीजों को मंगलवार सुबह सर्फ करने ले जा सकते हैं, क्या मुझे कैंसलेशन इंश्योरेंस मिल सकता है?"` यह प्रश्न कई कारकों पर निर्भर करता है, और शायद ऊपर दिए गए पूर्व-निर्धारित मानदंडों में से कोई भी इस अनुरोध के लिए पर्याप्त नहीं होगा। यदि पूर्व-निर्धारित वर्कफ़्लो बहुत बार विफल होता है, तो इसका मतलब है कि आपको अधिक लचीलेपन की आवश्यकता है। यहीं पर एक एजेंटिक सेटअप मदद करता है। ऊपर दिए गए उदाहरण में, आप बस एक मल्टी-स्टेप agent बना सकते हैं जिसके पास मौसम पूर्वानुमान के लिए एक मौसम API, यात्रा की दूरी जानने के लिए के लिए Google Maps API, एक कर्मचारी उपलब्धता डैशबोर्ड और आपके नॉलेज बेस पर एक RAG सिस्टम तक पहुंच है। हाल ही तक, कंप्यूटर प्रोग्राम पूर्व-निर्धारित वर्कफ़्लो तक सीमित थे, if/else स्विच का ढेर लगाकार जटिलता को संभालने का प्रयास कर रहे थे। वे बेहद संकीर्ण कार्यों पर केंद्रित थे, जैसे "इन संख्याओं का योग निकालें" या "इस ग्राफ़ में सबसे छोटा रास्ता खोजें"। लेकिन वास्तव में, अधिकांश वास्तविक जीवन के कार्य, जैसे ऊपर दिया गया हमारा यात्रा उदाहरण, पूर्व-निर्धारित वर्कफ़्लो में फिट नहीं होते हैं। एजेंटिक सिस्टम प्रोग्राम के लिए वास्तविक दुनिया के कार्यों की विशाल दुनिया खोलते हैं! ## क्यों `smolagents`? कुछ लो-लेवल एजेंटिक उपयोग के मामलों के लिए, जैसे चेन या राउटर, आप सभी कोड खुद लिख सकते हैं। आप इस तरह से बहुत बेहतर होंगे, क्योंकि यह आपको अपने सिस्टम को बेहतर ढंग से नियंत्रित और समझने की अनुमति देगा। लेकिन जैसे ही आप अधिक जटिल व्यवहारों की ओर बढ़ते हैं जैसे कि LLM को एक फ़ंक्शन कॉल करने देना (यह "tool calling" है) या LLM को एक while लूप चलाने देना ("multi-step agent"), कुछ एब्सट्रैक्शन्स की आवश्यकता होती है: - टूल कॉलिंग के लिए, आपको एजेंट के आउटपुट को पार्स करने की आवश्यकता होती है, इसलिए इस आउटपुट को एक पूर्व-निर्धारित प्रारूप की आवश्यकता होती है जैसे "विचार: मुझे 'get_weather' टूल कॉल करना चाहिए। क्रिया: get_weather(Paris)।", जिसे आप एक पूर्व-निर्धारित फ़ंक्शन के साथ पार्स करते हैं, और LLM को दिए गए सिस्टम प्रॉम्प्ट को इस प्रारूप के बारे में सूचित करना चाहिए। - एक मल्टी-स्टेप एजेंट के लिए जहां LLM आउटपुट लूप को निर्धारित करता है, आपको पिछले लूप इटरेशन में क्या हुआ इसके आधार पर LLM को एक अलग प्रॉम्प्ट देने की आवश्यकता होती है: इसलिए आपको किसी प्रकार की मेमोरी की आवश्यकता होती है। इन दो उदाहरणों के साथ, हमने पहले ही कुछ चीजों की आवश्यकता का पता लगा लिया: - बेशक, एक LLM जो सिस्टम को पावर देने वाले इंजन के रूप में कार्य करता है - एजेंट द्वारा एक्सेस किए जा सकने वाले टूल्स की एक सूची - एक पार्सर जो LLM आउटपुट से टूल कॉल को निकालता है - एक सिस्टम प्रोम्प्ट जो पार्सर के साथ सिंक्रनाइज़ होता है - एक मेमोरी लेकिन रुकिए, चूंकि हम निर्णयों में LLM को जगह देते हैं, निश्चित रूप से वे गलतियां करेंगे: इसलिए हमें एरर लॉगिंग और पुनः प्रयास तंत्र की आवश्यकता है। ये सभी तत्व एक अच्छे कामकाजी सिस्टम बनाने के लिए एक-दूसरे से घनिष्ठ रूप से जुड़े हुए हैं। यही कारण है कि हमने तय किया कि इन सभी चीजों को एक साथ काम करने के लिए बुनियादी निर्माण ब्लॉक्स की आवश्यकता है। ## कोड Agents एक मल्टी-स्टेप एजेंट में, प्रत्येक चरण पर, LLM बाहरी टूल्स को कुछ कॉल के रूप में एक क्रिया लिख सकता है। इन क्रियाओं को लिखने के लिए एक सामान्य स्वरूप (Anthropic, OpenAI और कई अन्य द्वारा उपयोग किया जाता है) आमतौर पर "टूल्स के नाम और उपयोग करने के लिए तर्कों के JSON के रूप में क्रियाएं लिखने" के विभिन्न रूप होते हैं, जिन्हें आप फिर पार्स करते हैं यह जानने के लिए कि कौन सा टूल किन तर्कों के साथ निष्पादित करना है"। [कई](https://huggingface.co/papers/2402.01030) [शोध](https://huggingface.co/papers/2411.01747) [पत्रों](https://huggingface.co/papers/2401.00812) ने दिखाया है कि कोड में टूल कॉलिंग LLM का होना बहुत बेहतर है। इसका कारण बस यह है कि *हमने अपनी कोड भाषाओं को विशेष रूप से कंप्यूटर द्वारा किए गए कार्यों को व्यक्त करने का सर्वोत्तम संभव तरीका बनाने के लिए तैयार किया*। यदि JSON स्निपेट्स बेहतर अभिव्यक्ति होते, तो JSON शीर्ष प्रोग्रामिंग भाषा होती और प्रोग्रामिंग नरक में होती। नीचे दी गई छवि, [Executable Code Actions Elicit Better LLM Agents](https://huggingface.co/papers/2402.01030) से ली गई है, जो कोड में क्रियाएं लिखने के कुछ फायदे दर्शाती है: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/code_vs_json_actions.png"> JSON जैसे स्निपेट्स की बजाय कोड में क्रियाएं लिखने से बेहतर प्राप्त होता है: - **कम्पोजेबिलिटी:** क्या आप JSON क्रियाओं को एक-दूसरे के भीतर नेस्ट कर सकते हैं, या बाद में पुन: उपयोग करने के लिए JSON क्रियाओं का एक सेट परिभाषित कर सकते हैं, उसी तरह जैसे आप बस एक पायथन फंक्शन परिभाषित कर सकते हैं? - **ऑब्जेक्ट प्रबंधन:** आप `generate_image` जैसी क्रिया के आउटपुट को JSON में कैसे स्टोर करते हैं? - **सामान्यता:** कोड को सरल रूप से कुछ भी व्यक्त करने के लिए बनाया गया है जो आप कंप्यूटर से करवा सकते हैं। - **LLM प्रशिक्षण डेटा में प्रतिनिधित्व:** बहुत सारी गुणवत्तापूर्ण कोड क्रियाएं पहले से ही LLM के ट्रेनिंग डेटा में शामिल हैं जिसका मतलब है कि वे इसके लिए पहले से ही प्रशिक्षित हैं!
smolagents/docs/source/hi/conceptual_guides/intro_agents.md/0
{ "file_path": "smolagents/docs/source/hi/conceptual_guides/intro_agents.md", "repo_id": "smolagents", "token_count": 11425 }
280
# Text-to-SQL[[text-to-sql]] [[open-in-colab]] 이 튜토리얼에서는 `smolagents`를 사용해 SQL을 다루는 에이전트를 구현해보겠습니다. > 먼저 중요한 질문 하나로 시작하겠습니다. 그냥 간단하게 일반적인 text-to-SQL 파이프라인을 쓰면 안 될까요? 표준 text-to-SQL 파이프라인은 안정성이 떨어지는 경우가 많습니다. 쿼리가 잘못 생성될 수 있고, 심지어는 오류 없이 틀리거나 쓸모없는 결과를 반환할 수도 있습니다. 👉 반면, 에이전트 시스템은 출력 결과를 비판적으로 점검할 수 있고 쿼리를 수정할 필요가 있는지 스스로 결정할 수 있이 성능이 크게 향상됩니다. 이제 이 에이전트를 직접 만들어봅시다! 💪 아래 명령어를 실행해 필요한 의존성을 설치하세요: ```bash !pip install smolagents python-dotenv sqlalchemy --upgrade -q ``` 추론 프로바이더를 호출하려면 환경 변수 `HF_TOKEN`에 유효한 토큰이 설정되어 있어야 합니다. python-dotenv를 이용해 환경 변수를 불러오겠습니다. ```py from dotenv import load_dotenv load_dotenv() ``` 다음으로, SQL 환경을 구성하겠습니다: ```py from sqlalchemy import ( create_engine, MetaData, Table, Column, String, Integer, Float, insert, inspect, text, ) engine = create_engine("sqlite:///:memory:") metadata_obj = MetaData() def insert_rows_into_table(rows, table, engine=engine): for row in rows: stmt = insert(table).values(**row) with engine.begin() as connection: connection.execute(stmt) table_name = "receipts" receipts = Table( table_name, metadata_obj, Column("receipt_id", Integer, primary_key=True), Column("customer_name", String(16), primary_key=True), Column("price", Float), Column("tip", Float), ) metadata_obj.create_all(engine) rows = [ {"receipt_id": 1, "customer_name": "Alan Payne", "price": 12.06, "tip": 1.20}, {"receipt_id": 2, "customer_name": "Alex Mason", "price": 23.86, "tip": 0.24}, {"receipt_id": 3, "customer_name": "Woodrow Wilson", "price": 53.43, "tip": 5.43}, {"receipt_id": 4, "customer_name": "Margaret James", "price": 21.11, "tip": 1.00}, ] insert_rows_into_table(rows, receipts) ``` ### 에이전트 만들기[[build-our-agent]] 이제 도구를 활용해 SQL 테이블을 조회할 수 있도록 만들어봅시다. 툴의 설명 속성은 에이전트 시스템에 의해 LLM 프롬프트에 포함되는 부분으로, LLM이 해당 도구를 어떻게 사용할 수 있는지에 대한 정보를 제공합니다. 바로 이 부분에 우리가 정의한 SQL 테이블의 설명을 작성하면 됩니다. ```py inspector = inspect(engine) columns_info = [(col["name"], col["type"]) for col in inspector.get_columns("receipts")] table_description = "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info]) print(table_description) ``` ```text Columns: - receipt_id: INTEGER - customer_name: VARCHAR(16) - price: FLOAT - tip: FLOAT ``` 이제 우리만의 툴을 만들어봅시다. 도구은 아래와 같은 요소를 필요로 합니다. (자세한 내용은 [도구 문서](../tutorials/tools)를 참고하세요) - 인자(`Args:`) 목록이 포함된 docstring - 입력과 출력에 대한 타입 힌트 ```py from smolagents import tool @tool def sql_engine(query: str) -> str: """ 테이블에 SQL 쿼리를 수행할 수 있습니다. 결과를 문자열로 반환합니다. 테이블 이름은 'receipts'이며, 설명은 다음과 같습니다: Columns: - receipt_id: INTEGER - customer_name: VARCHAR(16) - price: FLOAT - tip: FLOAT Args: query: 수행할 쿼리입니다. 올바른 SQL이어야 합니다. """ output = "" with engine.connect() as con: rows = con.execute(text(query)) for row in rows: output += "\n" + str(row) return output ``` 이제 이 도구를 사용하는 에이전트를 만들어보겠습니다. 여기서는 smolagent의 메인 에이전트 클래스인 `CodeAgent`를 사용합니다. `CodeAgent`는 코드로 액션을 작성하고 ReAct 프레임워크에 따라 이전 출력 결과를 반복적으로 개선할 수 있습니다. 모델은 에이전트 시스템을 구동하는 LLM을 의미합니다. `InferenceClientModel`을 사용하면 허깅페이스의 Inference API를 통해 서버리스 또는 Dedicated 엔드포인트 방식으로 LLM을 호출할 수 있으며, 필요에 따라 다른 사설 API를 사용할 수도 있습니다. ```py from smolagents import CodeAgent, InferenceClientModel agent = CodeAgent( tools=[sql_engine], model=InferenceClientModel(model_id="meta-llama/Llama-3.1-8B-Instruct"), ) agent.run("Can you give me the name of the client who got the most expensive receipt?") ``` ### 레벨 업: 테이블 조인[[level-2-table-joins]] 이제 좀 더 어려운 과제를 해결해 볼까요? 에이전트가 여러 테이블에 걸친 조인을 처리하도록 만들어 보겠습니다. 이를 위해 각 `receipt_id`에 해당하는 웨이터의 이름을 기록하는 두 번째 테이블을 만들어 보겠습니다. ```py table_name = "waiters" waiters = Table( table_name, metadata_obj, Column("receipt_id", Integer, primary_key=True), Column("waiter_name", String(16), primary_key=True), ) metadata_obj.create_all(engine) rows = [ {"receipt_id": 1, "waiter_name": "Corey Johnson"}, {"receipt_id": 2, "waiter_name": "Michael Watts"}, {"receipt_id": 3, "waiter_name": "Michael Watts"}, {"receipt_id": 4, "waiter_name": "Margaret James"}, ] insert_rows_into_table(rows, waiters) ``` 테이블이 변경되었기 때문에 LLM이 테이블 정보를 올바르게 활용할 수 있도록 `sql_engine`의 설명을 업데이트하겠습니다. ```py updated_description = """Allows you to perform SQL queries on the table. Beware that this tool's output is a string representation of the execution output. It can use the following tables:""" inspector = inspect(engine) for table in ["receipts", "waiters"]: columns_info = [(col["name"], col["type"]) for col in inspector.get_columns(table)] table_description = f"Table '{table}':\n" table_description += "Columns:\n" + "\n".join([f" - {name}: {col_type}" for name, col_type in columns_info]) updated_description += "\n\n" + table_description print(updated_description) ``` 이번 요청은 이전보다 조금 더 어려우므로, 더 강력한 [Qwen/Qwen2.5-Coder-32B-Instruct](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) 모델을 사용하도록 LLM 엔진을 바꾸겠습니다! ```py sql_engine.description = updated_description agent = CodeAgent( tools=[sql_engine], model=InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct"), ) agent.run("Which waiter got more total money from tips?") ``` 바로 성공입니다! 놀라울 만큼 간단하게 설정되지 않았나요? 이번 예제는 여기까지입니다! 지금까지 다음과 같은 개념들을 살펴보았습니다. - 새로운 도구 만들기 - 도구 설명 업데이트하기 - 더 강력한 LLM으로 에이전트 추론 능력 향상시키기 ✅ 이제 여러분이 꿈꿔왔던 text-to-SQL 시스템을 직접 만들어 보세요! ✨
smolagents/docs/source/ko/examples/text_to_sql.md/0
{ "file_path": "smolagents/docs/source/ko/examples/text_to_sql.md", "repo_id": "smolagents", "token_count": 4532 }
281
# 使用 OpenTelemetry 检查运行记录 [[open-in-colab]] > [!TIP] > 如果您是初次构建Agent,建议先阅读 [Agent 入门指南](../conceptual_guides/intro_agents) 和 [smolagents 导览](../guided_tour)。 ## 为什么需要记录Agent运行? 调试Agent运行过程具有挑战性。 验证运行是否正常进行很困难,因为Agent的工作流程本身具有 [设计上的不可预测性](../conceptual_guides/intro_agents)(如果可预测,直接使用传统代码即可)。 检查运行记录同样困难:多步骤的Agent往往会快速在控制台生成大量日志,而大多数错误只是"LLM 低级错误"类型的问题,通常LLM会在后续步骤中通过生成更好的代码或工具调用来自我修正。 因此,在生产环境中使用监控工具记录Agent运行过程,对于后续检查和分析至关重要! 我们采用 [OpenTelemetry](https://opentelemetry.io/) 标准来实现Agent运行监控。 这意味着您只需添加少量监控代码,即可在正常运行Agent时自动记录所有信息到监控平台。以下是在不同OpenTelemetry后端实现此功能的示例: 在监控平台上的展示效果如下: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/inspect_run_phoenix.gif"/> </div> ## 使用 Arize AI Phoenix 配置遥测 首先安装必要的软件包。这里我们选择安装 [Arize AI 的 Phoenix](https://github.com/Arize-ai/phoenix) 作为日志收集和检查方案,您也可以使用其他兼容 OpenTelemetry 的平台来完成收集与检查工作。 ```shell pip install 'smolagents[telemetry]' ``` 接着在后台运行日志收集器: ```shell python -m phoenix.server.main serve ``` 最后配置 `SmolagentsInstrumentor` 来追踪Agent活动,并将追踪数据发送至 Phoenix 默认端点: ```python from phoenix.otel import register from openinference.instrumentation.smolagents import SmolagentsInstrumentor register() SmolagentsInstrumentor().instrument() ``` 完成上述配置后,即可正常运行您的Agent! ```py from smolagents import ( CodeAgent, ToolCallingAgent, WebSearchTool, VisitWebpageTool, InferenceClientModel, ) model = InferenceClientModel() search_agent = ToolCallingAgent( tools=[WebSearchTool(), VisitWebpageTool()], model=model, name="search_agent", description="This is an agent that can do web search.", ) manager_agent = CodeAgent( tools=[], model=model, managed_agents=[search_agent], ) manager_agent.run( "If the US keeps its 2024 growth rate, how many years will it take for the GDP to double?" ) ``` Voilà! 此时访问 `http://0.0.0.0:6006/projects/` 即可查看运行记录: <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/inspect_run_phoenix.png"> 如图所示,CodeAgent 调用了其托管的 ToolCallingAgent(注:托管Agent也可以是另一个 CodeAgent)执行美国2024年经济增长率的网络搜索。托管Agent返回报告后,管理Agent根据结果计算出经济翻倍周期!是不是很智能? ## 使用 🪢 Langfuse 配置遥测 本部分演示如何通过 `SmolagentsInstrumentor` 使用 **Langfuse** 监控和调试 Hugging Face **smolagents**。 > **Langfuse 是什么?** [Langfuse](https://langfuse.com) 是面向LLM工程的开源平台,提供AI Agent的追踪与监控功能,帮助开发者调试、分析和优化产品。该平台通过原生集成、OpenTelemetry 和 SDKs 与各类工具框架对接。 ### 步骤 1: 安装依赖 ```python %pip install langfuse 'smolagents[telemetry]' openinference-instrumentation-smolagents ``` ### 步骤 2: 配置环境变量 设置 Langfuse API 密钥,并配置 OpenTelemetry 端点将追踪数据发送至 Langfuse。通过注册 [Langfuse Cloud](https://cloud.langfuse.com) 或 [自托管 Langfuse](https://langfuse.com/self-hosting) 获取 API 密钥。 同时需添加 [Hugging Face 令牌](https://huggingface.co/settings/tokens) (`HF_TOKEN`) 作为环境变量: ```python import os # Get keys for your project from the project settings page: https://cloud.langfuse.com os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-..." os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-..." os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 EU region # os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # 🇺🇸 US region # your Hugging Face token os.environ["HF_TOKEN"] = "hf_..." ``` ```python from langfuse import get_client langfuse = get_client() # Verify connection if langfuse.auth_check(): print("Langfuse client is authenticated and ready!") else: print("Authentication failed. Please check your credentials and host.") ``` ### 步骤 3: 初始化 `SmolagentsInstrumentor` 在应用程序代码执行前初始化 `SmolagentsInstrumentor`。 ```python from openinference.instrumentation.smolagents import SmolagentsInstrumentor SmolagentsInstrumentor().instrument() ``` ### 步骤 4: 运行 smolagent ```python from smolagents import ( CodeAgent, ToolCallingAgent, WebSearchTool, VisitWebpageTool, InferenceClientModel, ) model = InferenceClientModel( model_id="deepseek-ai/DeepSeek-R1-Distill-Qwen-32B" ) search_agent = ToolCallingAgent( tools=[WebSearchTool(), VisitWebpageTool()], model=model, name="search_agent", description="This is an agent that can do web search.", ) manager_agent = CodeAgent( tools=[], model=model, managed_agents=[search_agent], ) manager_agent.run( "How can Langfuse be used to monitor and improve the reasoning and decision-making of smolagents when they execute multi-step tasks, like dynamically adjusting a recipe based on user feedback or available ingredients?" ) ``` ### 步骤 5: 在 Langfuse 中查看追踪记录 运行Agent后,您可以在 [Langfuse](https://cloud.langfuse.com) 平台查看 smolagents 应用生成的追踪记录。这些记录会详细展示LLM的交互步骤,帮助您调试和优化AI代理。 ![smolagents 追踪示例](https://langfuse.com/images/cookbook/integration-smolagents/smolagent_example_trace.png) _[Langfuse 公开示例追踪](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/ce5160f9bfd5a6cd63b07d2bfcec6f54?timestamp=2025-02-11T09%3A25%3A45.163Z&display=details)_
smolagents/docs/source/zh/tutorials/inspect_runs.md/0
{ "file_path": "smolagents/docs/source/zh/tutorials/inspect_runs.md", "repo_id": "smolagents", "token_count": 3222 }
282
from smolagents import CodeAgent, InferenceClientModel, WebSearchTool model = InferenceClientModel() # Docker executor example with CodeAgent(tools=[WebSearchTool()], model=model, executor_type="docker") as agent: output = agent.run("How many seconds would it take for a leopard at full speed to run through Pont des Arts?") print("Docker executor result:", output) # E2B executor example with CodeAgent(tools=[WebSearchTool()], model=model, executor_type="e2b") as agent: output = agent.run("How many seconds would it take for a leopard at full speed to run through Pont des Arts?") print("E2B executor result:", output) # WebAssembly executor example with CodeAgent(tools=[], model=model, executor_type="wasm") as agent: output = agent.run("Calculate the square root of 125.") print("Wasm executor result:", output) # TODO: Support tools # with CodeAgent(tools=[VisitWebpageTool()], model=model, executor_type="wasm") as agent: # output = agent.run("What is the content of the Wikipedia page at https://en.wikipedia.org/wiki/Intelligent_agent?")
smolagents/examples/sandboxed_execution.py/0
{ "file_path": "smolagents/examples/sandboxed_execution.py", "repo_id": "smolagents", "token_count": 318 }
283
#!/usr/bin/env python # coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import warnings from types import TracebackType from typing import TYPE_CHECKING, Any from smolagents.tools import Tool __all__ = ["MCPClient"] if TYPE_CHECKING: from mcpadapt.core import StdioServerParameters class MCPClient: """Manages the connection to an MCP server and make its tools available to SmolAgents. Note: tools can only be accessed after the connection has been started with the `connect()` method, done during the init. If you don't use the context manager we strongly encourage to use "try ... finally" to ensure the connection is cleaned up. Args: server_parameters (StdioServerParameters | dict[str, Any] | list[StdioServerParameters | dict[str, Any]]): Configuration parameters to connect to the MCP server. Can be a list if you want to connect multiple MCPs at once. - An instance of `mcp.StdioServerParameters` for connecting a Stdio MCP server via standard input/output using a subprocess. - A `dict` with at least: - "url": URL of the server. - "transport": Transport protocol to use, one of: - "streamable-http": Streamable HTTP transport (default). - "sse": Legacy HTTP+SSE transport (deprecated). adapter_kwargs (dict[str, Any], optional): Additional keyword arguments to be passed directly to `MCPAdapt`. structured_output (bool, optional, defaults to False): Whether to enable structured output features for MCP tools. If True, enables: - Support for outputSchema in MCP tools - Structured content handling (structuredContent from MCP responses) - JSON parsing fallback for structured data If False, uses the original simple text-only behavior for backwards compatibility. Example: ```python # fully managed context manager + stdio with MCPClient(...) as tools: # tools are now available # context manager + Streamable HTTP transport: with MCPClient({"url": "http://localhost:8000/mcp", "transport": "streamable-http"}) as tools: # tools are now available # Enable structured output for advanced MCP tools: with MCPClient(server_parameters, structured_output=True) as tools: # tools with structured output support are now available # manually manage the connection via the mcp_client object: try: mcp_client = MCPClient(...) tools = mcp_client.get_tools() # use your tools here. finally: mcp_client.disconnect() ``` """ def __init__( self, server_parameters: "StdioServerParameters" | dict[str, Any] | list["StdioServerParameters" | dict[str, Any]], adapter_kwargs: dict[str, Any] | None = None, structured_output: bool | None = None, ): # Handle future warning for structured_output default value change if structured_output is None: warnings.warn( "Parameter 'structured_output' was not specified. " "Currently it defaults to False, but in version 1.25, the default will change to True. " "To suppress this warning, explicitly set structured_output=True (new behavior) or structured_output=False (legacy behavior). " "See documentation at https://huggingface.co/docs/smolagents/tutorials/tools#structured-output-and-output-schema-support for more details.", FutureWarning, stacklevel=2, ) structured_output = False try: from mcpadapt.core import MCPAdapt from mcpadapt.smolagents_adapter import SmolAgentsAdapter except ModuleNotFoundError: raise ModuleNotFoundError("Please install 'mcp' extra to use MCPClient: `pip install 'smolagents[mcp]'`") if isinstance(server_parameters, dict): transport = server_parameters.get("transport") if transport is None: transport = "streamable-http" server_parameters["transport"] = transport if transport not in {"sse", "streamable-http"}: raise ValueError( f"Unsupported transport: {transport}. Supported transports are 'streamable-http' and 'sse'." ) adapter_kwargs = adapter_kwargs or {} self._adapter = MCPAdapt( server_parameters, SmolAgentsAdapter(structured_output=structured_output), **adapter_kwargs ) self._tools: list[Tool] | None = None self.connect() def connect(self): """Connect to the MCP server and initialize the tools.""" self._tools: list[Tool] = self._adapter.__enter__() def disconnect( self, exc_type: type[BaseException] | None = None, exc_value: BaseException | None = None, exc_traceback: TracebackType | None = None, ): """Disconnect from the MCP server""" self._adapter.__exit__(exc_type, exc_value, exc_traceback) def get_tools(self) -> list[Tool]: """The SmolAgents tools available from the MCP server. Note: for now, this always returns the tools available at the creation of the session, but it will in a future release return also new tools available from the MCP server if any at call time. Raises: ValueError: If the MCP server tools is None (usually assuming the server is not started). Returns: list[Tool]: The SmolAgents tools available from the MCP server. """ if self._tools is None: raise ValueError( "Couldn't retrieve tools from MCP server, run `mcp_client.connect()` first before accessing `tools`" ) return self._tools def __enter__(self) -> list[Tool]: """Connect to the MCP server and return the tools directly. Note that because of the `.connect` in the init, the mcp_client is already connected at this point. """ return self._tools def __exit__( self, exc_type: type[BaseException] | None, exc_value: BaseException | None, exc_traceback: TracebackType | None, ): """Disconnect from the MCP server.""" self.disconnect(exc_type, exc_value, exc_traceback)
smolagents/src/smolagents/mcp_client.py/0
{ "file_path": "smolagents/src/smolagents/mcp_client.py", "repo_id": "smolagents", "token_count": 2709 }
284
import pytest from smolagents.tools import Tool, tool @pytest.fixture def test_tool(): class TestTool(Tool): name = "test_tool" description = "A test tool" inputs = {"input": {"type": "string", "description": "Input value"}} output_type = "string" def forward(self, input): if input == "error": raise ValueError("Tool execution error") return f"Processed: {input}" return TestTool() @pytest.fixture def no_input_tool(): class NoInputTool(Tool): name = "no_input_tool" description = "Tool with no inputs" inputs = {} output_type = "string" def forward(self): return "test" return NoInputTool() @pytest.fixture def single_input_tool(): class SingleInputTool(Tool): name = "single_input_tool" description = "Tool with one input" inputs = {"text": {"type": "string", "description": "Input text"}} output_type = "string" def forward(self, text): return "test" return SingleInputTool() @pytest.fixture def multi_input_tool(): class MultiInputTool(Tool): name = "multi_input_tool" description = "Tool with multiple inputs" inputs = { "text": {"type": "string", "description": "Text input"}, "count": {"type": "integer", "description": "Number count"}, } output_type = "object" def forward(self, text, count): return "test" return MultiInputTool() @pytest.fixture def multiline_description_tool(): class MultilineDescriptionTool(Tool): name = "multiline_description_tool" description = "This is a tool with\nmultiple lines\nin the description" inputs = {"input": {"type": "string", "description": "Some input"}} output_type = "string" def forward(self, input): return "test" return MultilineDescriptionTool() @pytest.fixture def example_tool(): @tool def valid_tool_function(input: str) -> str: """A valid tool function. Args: input (str): Input string. """ return input.upper() return valid_tool_function @pytest.fixture def boolean_default_tool_class(): class BooleanDefaultTool(Tool): name = "boolean_default_tool" description = "A tool with a boolean default parameter" inputs = { "text": {"type": "string", "description": "Input text"}, "flag": {"type": "boolean", "description": "Boolean flag with default value", "nullable": True}, } output_type = "string" def forward(self, text: str, flag: bool = False) -> str: return f"Text: {text}, Flag: {flag}" return BooleanDefaultTool() @pytest.fixture def boolean_default_tool_function(): @tool def boolean_default_tool(text: str, flag: bool = False) -> str: """ A tool with a boolean default parameter. Args: text: Input text flag: Boolean flag with default value """ return f"Text: {text}, Flag: {flag}" return boolean_default_tool @pytest.fixture def optional_input_tool_class(): class OptionalInputTool(Tool): name = "optional_input_tool" description = "A tool with an optional input parameter" inputs = { "required_text": {"type": "string", "description": "Required input text"}, "optional_text": {"type": "string", "description": "Optional input text", "nullable": True}, } output_type = "string" def forward(self, required_text: str, optional_text: str | None = None) -> str: if optional_text: return f"{required_text} + {optional_text}" return required_text return OptionalInputTool() @pytest.fixture def optional_input_tool_function(): @tool def optional_input_tool(required_text: str, optional_text: str | None = None) -> str: """ A tool with an optional input parameter. Args: required_text: Required input text optional_text: Optional input text """ if optional_text: return f"{required_text} + {optional_text}" return required_text return optional_input_tool
smolagents/tests/fixtures/tools.py/0
{ "file_path": "smolagents/tests/fixtures/tools.py", "repo_id": "smolagents", "token_count": 1787 }
285
# coding=utf-8 # Copyright 2025 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Source: https://github.com/Arize-ai/openinference/blob/main/python/instrumentation/openinference-instrumentation-smolagents/tests/openinference/instrumentation/smolagents/test_instrumentor.py from typing import Generator import pytest from .utils.markers import require_run_all # Add this at the module level to skip all tests if OpenTelemetry is not available pytest.importorskip("opentelemetry", reason="requires opentelemetry") pytest.importorskip( "openinference.instrumentation.smolagents", reason="requires openinference.instrumentation.smolagents" ) from openinference.instrumentation.smolagents import SmolagentsInstrumentor from opentelemetry import trace as trace_api from opentelemetry.sdk import trace as trace_sdk from opentelemetry.sdk.resources import Resource from opentelemetry.sdk.trace.export import SimpleSpanProcessor from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter from smolagents.models import InferenceClientModel @pytest.fixture def in_memory_span_exporter() -> InMemorySpanExporter: return InMemorySpanExporter() @pytest.fixture def tracer_provider(in_memory_span_exporter: InMemorySpanExporter) -> trace_api.TracerProvider: resource = Resource(attributes={}) tracer_provider = trace_sdk.TracerProvider(resource=resource) span_processor = SimpleSpanProcessor(span_exporter=in_memory_span_exporter) tracer_provider.add_span_processor(span_processor=span_processor) return tracer_provider @pytest.fixture(autouse=True) def instrument( tracer_provider: trace_api.TracerProvider, in_memory_span_exporter: InMemorySpanExporter, ) -> Generator[None, None, None]: SmolagentsInstrumentor().instrument(tracer_provider=tracer_provider, skip_dep_check=True) yield SmolagentsInstrumentor().uninstrument() in_memory_span_exporter.clear() @require_run_all class TestOpenTelemetry: def test_model(self, in_memory_span_exporter: InMemorySpanExporter): model = InferenceClientModel() _ = model( messages=[ { "role": "user", "content": [ { "type": "text", "text": "Who won the World Cup in 2018? Answer in one word with no punctuation.", } ], } ] ) spans = in_memory_span_exporter.get_finished_spans() assert len(spans) == 1 span = spans[0] assert span.name == "InferenceClientModel.generate" assert span.status.is_ok assert span.attributes
smolagents/tests/test_telemetry.py/0
{ "file_path": "smolagents/tests/test_telemetry.py", "repo_id": "smolagents", "token_count": 1217 }
286
# Build the image and get out the docker file: # # docker build -t tgi-nix-builder -f Dockerfile.nix # docker run --log-driver=none tgi-nix-builder | docker load FROM nixos/nix:2.18.8 AS builder RUN echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf RUN nix profile install nixpkgs#cachix RUN cachix use huggingface WORKDIR /root ADD . . RUN nix build . RUN mkdir /tmp/nix-store-closure RUN cp -R $(nix-store -qR result/) /tmp/nix-store-closure FROM ubuntu:24.04 WORKDIR /app # Copy /nix/store COPY --from=builder /tmp/nix-store-closure /nix/store COPY --from=builder /root/result /app RUN ldconfig CMD ["ldconfig", "/app/bin/text-generation-launcher"]
text-generation-inference/Dockerfile.nix/0
{ "file_path": "text-generation-inference/Dockerfile.nix", "repo_id": "text-generation-inference", "token_count": 264 }
287
from text_generation_server.layers.tensor_parallel import ( TensorParallelColumnLinear, TensorParallelRowLinear, TensorParallelEmbedding, ) from text_generation_server.layers.linear import ( get_linear, FastLinear, ) from text_generation_server.layers.speculative import SpeculativeHead # Just to add the `load` methods. from text_generation_server.layers.layernorm import load_layer_norm from text_generation_server.layers.conv import load_conv2d from text_generation_server.layers.fp8 import Fp8Linear from text_generation_server.layers.lora import ( LoraLinear, TensorParallelMultiAdapterLinear, TensorParallelAdapterRowLinear, ) __all__ = [ "get_linear", "FastLinear", "TensorParallelColumnLinear", "TensorParallelRowLinear", "TensorParallelEmbedding", "SpeculativeHead", "LoraLinear", "Fp8Linear", "TensorParallelMultiAdapterLinear", "TensorParallelAdapterRowLinear", "load_layer_norm", "load_conv2d", ]
text-generation-inference/backends/gaudi/server/text_generation_server/layers/__init__.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/layers/__init__.py", "repo_id": "text-generation-inference", "token_count": 376 }
288
import math import numpy as np import torch import torch.nn as nn try: convert_from_uint4 = torch.ops.hpu.convert_from_uint4 except Exception as e: hpu_import_exception = e def error_raiser_hpu(*args, **kwargs): raise ValueError( f"Trying to use HPU, but could not import the HPU framework with the following error: {hpu_import_exception}" ) convert_from_uint4 = error_raiser_hpu def pack_tensor(input, bits=4): normal = input.to(torch.int32) q = torch.zeros((normal.shape[0], normal.shape[1] // 32 * bits), dtype=torch.int32) i = 0 col = 0 while col < q.shape[1]: for j in range(i, i + (32 // bits)): q[:, col] |= normal[:, j] << (bits * (j - i)) i += 32 // bits col += 1 q = q.to(torch.int32) return q class QuantLinear(nn.Module): def __init__(self, qweight, qzeros, scales, g_idx, bias, bits, groupsize): super().__init__() self.register_buffer("qweight", qweight) self.register_buffer("qzeros", qzeros) self.register_buffer("scales", scales) self.register_buffer("g_idx", g_idx) if bias is not None: self.register_buffer("bias", bias) else: self.bias = None if bits not in [4]: raise NotImplementedError("Only 4 bits are supported.") self.bits = bits self.maxq = 2**self.bits - 1 self.groupsize = groupsize self.outfeatures = qweight.shape[1] self.infeatures = qweight.shape[0] * 32 // bits self.wf = torch.tensor( list(range(0, 32, self.bits)), dtype=torch.int32 ).unsqueeze(0) self._preprocessing() def unpack_zeros_from_cuda_old_format(self): zeros = torch.bitwise_right_shift( torch.unsqueeze(self.qzeros, 2).expand(-1, -1, 32 // self.bits), self.wf.unsqueeze(0), ).to(torch.int16 if self.bits == 8 else torch.int8) zeros = zeros + 1 zeros = torch.bitwise_and(zeros, (2**self.bits) - 1).to( self.scales.dtype ) # NOTE: It appears that casting here after the `zeros = zeros + 1` is important. zeros = zeros.reshape(-1, zeros.shape[1] * zeros.shape[2]) return zeros def unpack_weight_from_cuda_old_format(self): weight = torch.bitwise_right_shift( torch.unsqueeze(self.qweight, 1).expand(-1, 32 // self.bits, -1), self.wf.unsqueeze(-1), ).to(torch.int16 if self.bits == 8 else torch.int8) weight = torch.bitwise_and(weight, (2**self.bits) - 1) weight = weight.reshape((weight.shape[0] * weight.shape[1], weight.shape[2])) return weight def _preprocessing(self): orig_device = self.qweight.device self.qweight = self.qweight.cpu() weight = self.unpack_weight_from_cuda_old_format() new_qweight = pack_tensor(weight) self.qweight = new_qweight.to(orig_device) # TODO: Support group indexing and remove the check columns = self.qweight.shape[0] g_idx_trivial = [i // self.groupsize for i in range(columns)] g_idx_trivial = torch.tensor( g_idx_trivial, dtype=torch.int32, device=self.g_idx.device ) sort_zeros = not (torch.equal(self.g_idx, g_idx_trivial)) self.qzeros = self.qzeros.cpu() zeros = self.unpack_zeros_from_cuda_old_format() if sort_zeros: zeros_group_1 = torch.zeros( (self.infeatures, self.outfeatures), dtype=zeros.dtype, device=zeros.device, ) scales = self.scales.cpu() scale_group_1 = torch.zeros( (self.infeatures, self.outfeatures), dtype=scales.dtype, device=scales.device, ) for i in range(self.infeatures): zeros_group_1[i] = zeros[self.g_idx[i]] scale_group_1[i] = self.scales[self.g_idx[i]] self.qzeros = pack_tensor(zeros_group_1).to(orig_device) self.scales = scale_group_1.to(orig_device) self.groupsize = 1 self.g_idx = None else: new_qzeros = pack_tensor(zeros) self.qzeros = new_qzeros.to(orig_device) @classmethod def new(cls, bits, groupsize, infeatures, outfeatures, bias): if bits not in [4]: raise NotImplementedError("Only 4 bits are supported.") qweight = torch.zeros((infeatures // 32 * bits, outfeatures), dtype=torch.int32) qzeros = torch.zeros( (math.ceil(infeatures / groupsize), outfeatures // 32 * bits), dtype=torch.int32, ) scales = torch.zeros( (math.ceil(infeatures / groupsize), outfeatures), dtype=torch.float16 ) g_idx = torch.tensor( [i // groupsize for i in range(infeatures)], dtype=torch.int32 ) if bias: bias = torch.zeros((outfeatures), dtype=torch.float16) else: bias = None return cls(qweight, qzeros, scales, g_idx, bias, bits, groupsize) def pack(self, linear, scales, zeros, g_idx=None): self.g_idx = g_idx.clone() if g_idx is not None else self.g_idx scales = scales.t().contiguous() zeros = zeros.t().contiguous() scale_zeros = zeros * scales self.scales = scales.clone().half() if linear.bias is not None: self.bias = linear.bias.clone().half() intweight = [] for idx in range(self.infeatures): intweight.append( torch.round( (linear.weight.data[:, idx] + scale_zeros[self.g_idx[idx]]) / self.scales[self.g_idx[idx]] ).to(torch.int)[:, None] ) intweight = torch.cat(intweight, dim=1) intweight = intweight.t().contiguous() intweight = intweight.numpy().astype(np.uint32) qweight = np.zeros( (intweight.shape[0] // 32 * self.bits, intweight.shape[1]), dtype=np.uint32 ) i = 0 row = 0 while row < qweight.shape[0]: if self.bits in [4]: for j in range(i, i + (32 // self.bits)): qweight[row] |= intweight[j] << (self.bits * (j - i)) i += 32 // self.bits row += 1 else: raise NotImplementedError("Only 4 bits are supported.") qweight = qweight.astype(np.int32) self.qweight = torch.from_numpy(qweight) zeros -= 1 zeros = zeros.numpy().astype(np.uint32) qzeros = np.zeros( (zeros.shape[0], zeros.shape[1] // 32 * self.bits), dtype=np.uint32 ) i = 0 col = 0 while col < qzeros.shape[1]: if self.bits in [4]: for j in range(i, i + (32 // self.bits)): qzeros[:, col] |= zeros[:, j] << (self.bits * (j - i)) i += 32 // self.bits col += 1 else: raise NotImplementedError("Only 4 bits are supported.") qzeros = qzeros.astype(np.int32) self.qzeros = torch.from_numpy(qzeros) def forward(self, x): out_shape = x.shape[:-1] + (self.outfeatures,) x = x.reshape(-1, x.shape[-1]) weight = convert_from_uint4(self.qweight, self.scales, self.qzeros, x.dtype) out = torch.matmul(x, weight) out = out.reshape(out_shape) out = out + self.bias if self.bias is not None else out return out
text-generation-inference/backends/gaudi/server/text_generation_server/layers/gptq/hpu.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/layers/gptq/hpu.py", "repo_id": "text-generation-inference", "token_count": 3861 }
289
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional, Tuple, Type import torch import torch.distributed from torch import nn from transformers.configuration_utils import PretrainedConfig from text_generation_server.layers import ( FastLinear, SpeculativeHead, TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, get_linear, ) from text_generation_server.layers.attention import ( Seqlen, attention, paged_attention, set_block_mapping, HPUPagedAttentionMetadata, ) from text_generation_server.layers.attention.kv_cache import get_kv_scales from text_generation_server.layers.layernorm import FastRMSNorm from text_generation_server.layers.moe import DenseMoELayer, MoELayer, SparseMoELayer from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.utils.weights import UnquantizedWeight import habana_frameworks.torch as htorch class MixtralConfig(PretrainedConfig): model_type = "mixtral" def __init__( self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act="silu", max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, sliding_window=None, num_experts_per_tok=2, num_local_experts=8, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.pretraining_tp = pretraining_tp self.use_cache = use_cache self.rope_theta = rope_theta self.num_experts_per_tok = num_experts_per_tok self.num_local_experts = num_local_experts super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) def promote_scalar(x: torch.Tensor) -> torch.Tensor: return x.view(1) if len(x.size()) == 0 else x def load_attention(config, prefix: str, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=False, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, ) if isinstance(weight, UnquantizedWeight): weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" return TensorParallelColumnLinear(get_linear(weight, bias=None)) def _load_experts(config, prefix: str, mat, weights): if config.quantize is not None: raise NotImplementedError("Mixtral does not support weight quantization yet.") assert mat in ["w1", "w2", "w3"] world_size = weights.process_group.size() rank = weights.process_group.rank() assert ( config.intermediate_size % world_size == 0 ), f"The chosen size {config.intermediate_size} is not compatible with sharding on {world_size} shards" block_size = config.intermediate_size // world_size start = rank * block_size stop = (rank + 1) * block_size tensor = torch.empty( (config.num_local_experts * block_size, config.hidden_size), dtype=weights.dtype, device=weights.device, ) for i in range(config.num_local_experts): slice_ = weights._get_slice(f"{prefix}.{i}.{mat}.weight") if mat == "w2": expert_slice = slice_[:, start:stop].t().contiguous() else: expert_slice = slice_[start:stop] tensor[i * block_size : (i + 1) * block_size] = expert_slice.to( dtype=weights.dtype ).to(device=weights.device) return tensor class MixtralAttention(torch.nn.Module): def __init__( self, prefix: str, config, weights, rotary_emb, ): super().__init__() self.max_past = ( config.sliding_window if config.sliding_window is not None else -1 ) self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.rotary_emb = rotary_emb self.softmax_scale = self.head_size**-0.5 if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( config.num_key_value_heads // weights.process_group.size() ) self.query_key_value = load_attention(config, prefix, weights) self.kv_scales = get_kv_scales(weights, f"{prefix}") self.o_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.o_proj", weights=weights, bias=False, ) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange( 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device ).repeat_interleave(self.num_groups) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ): qkv = self.query_key_value(hidden_states) query, kv = qkv.split( [ self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads, ], dim=1, ) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) kv_cache.store( key=kv[:, 0], value=kv[:, 1], slots=slots, kv_scales=self.kv_scales, ) # Prefill if cu_seqlen_prefill is not None: # sdpa attn_output = attention( query=query, key=kv[:, 0], value=kv[:, 1], kv_cache=kv_cache, kv_scales=self.kv_scales, seqlen=seqlen, softmax_scale=self.softmax_scale, window_size_left=self.max_past, ) # Decode else: attn_output = paged_attention( query, kv_cache, self.kv_head_mapping, self.softmax_scale, seqlen, kv_scales=self.kv_scales, hpu_attention_meta=hpu_attention_meta, ) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) @torch.jit.script def select_experts(gate_logits: torch.Tensor, top_k: int): # all_probs: (sequence_length, n_experts) and upcast for softmax all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) # weights, selected_experts: (sequence_length, top-k) weights, selected_experts = torch.topk(all_probs, top_k, dim=-1) weights /= weights.sum(dim=-1, keepdim=True) weights = weights.view(-1) selected_experts = selected_experts.view(-1) return selected_experts, weights @torch.jit.script def round_up(x: torch.Tensor, value: int): return torch.div(x + (value - 1), value, rounding_mode="trunc") * value class MixtralMoE(nn.Module): def __init__( self, prefix, config: MixtralConfig, moe_layer_cls: Type[MoELayer], weights ): super().__init__() # gating self.gate = FastLinear.load(config, f"{prefix}.gate", weights, bias=False) self.moe = moe_layer_cls( n_expert_group=None, n_experts=config.num_local_experts, prefix=f"{prefix}.experts", renormalize=True, topk=config.num_experts_per_tok, topk_group=None, weights=weights, gate_proj_name="w1", up_proj_name="w3", down_proj_name="w2", ) assert isinstance(self.moe, MoELayer) self.process_group = weights.process_group def forward(self, x: torch.Tensor) -> torch.Tensor: # router_logits: (num_tokens, n_experts) router_logits = self.gate(x) out = self.moe(x, gating_output=router_logits) # Reduce sum if self.process_group.size() > 1: torch.distributed.all_reduce(out, group=self.process_group) return out.view(*x.shape) class MixtralLayer(nn.Module): def __init__(self, prefix: str, layer_id, config, weights, rotary_emb): super().__init__() prefix = f"{prefix}.layers.{layer_id}" self.self_attn = MixtralAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights, rotary_emb=rotary_emb, ) moe_layer_cls = ( SparseMoELayer if SparseMoELayer.is_supported(weights) else DenseMoELayer ) self.moe = MixtralMoE( f"{prefix}.block_sparse_moe", config, moe_layer_cls, weights ) self.input_layernorm = FastRMSNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps ) self.post_attention_layernorm = FastRMSNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=config.rms_norm_eps, ) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ): normed_hidden_states, res = self.input_layernorm(hidden_states, residual) # Self Attention attn_output = self.self_attn( normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ) # faster post attention rms norm normed_attn_res_output, attn_res = self.post_attention_layernorm( attn_output, res ) moe_output = self.moe(normed_attn_res_output) return moe_output, attn_res class MixtralModel(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.embed_tokens = TensorParallelEmbedding( prefix=( "model.embed_tokens" if not prefix else f"{prefix}.model.embed_tokens" ), weights=weights, ) rotary_emb = PositionRotaryEmbedding.static( config=config, dim=config.hidden_size // config.num_attention_heads, base=config.rope_theta, device=weights.device, ) self.layers = nn.ModuleList( [ MixtralLayer( "model" if not prefix else f"{prefix}.model", layer_id, config, weights, rotary_emb, ) for layer_id in range(config.num_hidden_layers) ] ) self.norm = FastRMSNorm.load( prefix="model.norm" if not prefix else f"{prefix}.model.norm", weights=weights, eps=config.rms_norm_eps, ) self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], slots: torch.Tensor, seqlen: Seqlen, hpu_attention_meta: Optional[HPUPagedAttentionMetadata], ) -> torch.Tensor: if hpu_attention_meta is not None: hpu_attention_meta = set_block_mapping( hpu_attention_meta, input_ids.shape[0] ) hidden_states = self.embed_tokens(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin(position_ids) residual = None lazy_mode = htorch.utils.internal.is_lazy() if lazy_mode: htorch.core.mark_step() for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], slots, seqlen, hpu_attention_meta, ) if lazy_mode: htorch.core.mark_step() hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class FlashMixtralForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights): super().__init__() self.model = MixtralModel(prefix, config, weights) self.lm_head = SpeculativeHead.load( config, prefix="lm_head" if not prefix else f"{prefix}.lm_head", weights=weights, ) self.max_past = config.sliding_window self.max_past_tensor = ( torch.tensor(config.sliding_window, device=weights.device) if self.max_past is not None else None ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], slots: torch.Tensor, seqlen: Seqlen, hpu_attention_meta: Optional[HPUPagedAttentionMetadata], lm_head_indices: Optional[torch.Tensor] = None, adapter_data: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = self.model( input_ids, position_ids, cu_seqlen_prefill, kv_cache, slots, seqlen, hpu_attention_meta, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits
text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py", "repo_id": "text-generation-inference", "token_count": 8403 }
290
# coding=utf-8 # Copyright 2024 the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Qwen2 VL model.""" from typing import Optional, Tuple, List import torch import torch.utils.checkpoint from torch import nn from habana_frameworks.torch.hpex.kernels import FusedSDPA from vllm_hpu_extension.utils import ModuleFusedSDPA import numpy as np from transformers.activations import ACT2FN import torch.nn.functional as F from text_generation_server.layers.layernorm import FastLayerNorm, FastRMSNorm from text_generation_server.layers import ( TensorParallelColumnLinear, TensorParallelRowLinear, TensorParallelEmbedding, SpeculativeHead, ) from text_generation_server.layers.attention import ( Seqlen, HPUPagedAttentionMetadata, ) from text_generation_server.models.custom_modeling.flash_qwen2_modeling import ( Qwen2Model, ) from habana_frameworks.torch.hpex.kernels import ( RotaryPosEmbeddingMode, apply_rotary_pos_emb, ) import habana_frameworks.torch as htorch class Qwen2VLAttention(nn.Module): def __init__(self, *, prefix, config, weights): super().__init__() self.embed_dim = config.embed_dim // weights.process_group.size() self.head_dim = config.hidden_size // config.num_heads self.num_heads = config.num_heads // weights.process_group.size() self.qkv = TensorParallelColumnLinear.load_qkv( config, prefix=f"{prefix}.qkv", weights=weights, bias=False, num_heads=self.num_heads, num_key_value_heads=self.num_heads, ) self.qkv.linear.bias = weights.get_sharded(f"{prefix}.qkv.bias", dim=0) self.proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.proj", weights=weights, bias=True, ) self.softmax_scale = 1.0 / np.sqrt(self.embed_dim // self.num_heads) def forward( self, hidden_state: torch.Tensor, cu_seqlens: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, max_seqlen: int, ) -> torch.Tensor: # apply the qkv linear layer to the hidden state qkv = self.qkv(hidden_state) query, key, value = qkv.split( [self.embed_dim, self.embed_dim, self.embed_dim], dim=1 ) # reshape the query, key, and value tensors _shape = ( hidden_state.shape[0], self.num_heads, self.embed_dim // self.num_heads, ) query = query.view(*_shape) key = key.view(*_shape) value = value.view(*_shape) # apply rotary positional embeddings rope_mode = RotaryPosEmbeddingMode.BLOCKWISE rotary_dim = cos.shape[-1] query_rot = query[..., :rotary_dim] query_pass = query[..., rotary_dim:] query_rot = apply_rotary_pos_emb(query_rot, cos, sin, None, 0, rope_mode) query.copy_(torch.cat((query_rot, query_pass), dim=-1).reshape(query.shape)) key_rot = key[..., :rotary_dim] key_pass = key[..., rotary_dim:] key_rot = apply_rotary_pos_emb(key_rot, cos, sin, None, 0, rope_mode) key.copy_(torch.cat((key_rot, key_pass), dim=-1).reshape(key.shape)) # execute sdpa causal = False query = query.transpose(0, 1) key = key.transpose(0, 1) value = value.transpose(0, 1) fsdpa_op = ModuleFusedSDPA(FusedSDPA) attention_mask = torch.zeros( [1, max_seqlen, max_seqlen], device=query.device, dtype=torch.bool ) for i in range(1, len(cu_seqlens)): attention_mask[ :, cu_seqlens[i - 1] : cu_seqlens[i], cu_seqlens[i - 1] : cu_seqlens[i] ] = True attn_output = fsdpa_op( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=causal, scale=None, softmax_mode="None", recompute_mode=None, valid_sequence_lengths=None, ) attn_output = attn_output.transpose(0, 1) # reshape output to original dimensions attn_output = attn_output.reshape(hidden_state.shape[0], -1) attn_output = self.proj(attn_output) return attn_output class Qwen2VLVisionMLP(nn.Module): def __init__(self, *, prefix, config, weights): super().__init__() self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = TensorParallelColumnLinear.load( prefix=f"{prefix}.fc1", weights=weights, config=config, bias=True ) self.fc2 = TensorParallelRowLinear.load( prefix=f"{prefix}.fc2", weights=weights, config=config, bias=True ) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class Qwen2VLVisionBlock(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.attn = Qwen2VLAttention( prefix=f"{prefix}.attn", config=config, weights=weights, ) self.norm1 = FastLayerNorm.load( prefix=f"{prefix}.norm1", weights=weights, eps=1e-6, ) self.norm2 = FastLayerNorm.load( prefix=f"{prefix}.norm2", weights=weights, eps=1e-6, ) self.mlp = Qwen2VLVisionMLP( prefix=f"{prefix}.mlp", config=config, weights=weights, ) def forward(self, hidden_states, cu_seqlens, cos, sin, max_seqlen) -> torch.Tensor: norm1_out, residual = self.norm1(hidden_states) attn_out = self.attn(norm1_out, cu_seqlens, cos, sin, max_seqlen) hidden_states = attn_out + residual norm2_out, residual = self.norm2(hidden_states) hidden_states = hidden_states + self.mlp(norm2_out) return hidden_states class Qwen2VLPatchMerger(nn.Module): def __init__(self, *, prefix, config, weights): super().__init__() self.hidden_size = config.embed_dim * (config.spatial_merge_size**2) self.patch_merger_ln_q = FastLayerNorm.load( prefix=f"{prefix}.ln_q", weights=weights, eps=1e-6, ) self.fc1 = TensorParallelColumnLinear.load( prefix=f"{prefix}.mlp.0", weights=weights, config=config, bias=True ) self.fc2 = TensorParallelRowLinear.load( prefix=f"{prefix}.mlp.2", weights=weights, config=config, bias=True ) def forward(self, hidden_states) -> torch.Tensor: hidden_states, _ = self.patch_merger_ln_q(hidden_states) hidden_states = hidden_states.view(-1, self.hidden_size) hidden_states = self.fc1(hidden_states) hidden_states = F.gelu(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class Qwen2VisionModel(nn.Module): def __init__(self, *, prefix, config, weights): super().__init__() self.spatial_merge_size = config.spatial_merge_size kernel_size = [config.temporal_patch_size, config.patch_size, config.patch_size] self.patch_embedding = nn.Conv3d( in_channels=config.in_chans, out_channels=config.embed_dim, kernel_size=kernel_size, stride=kernel_size, bias=False, ) self.patch_embedding.weight = nn.Parameter( weights.get_tensor(f"{prefix}.patch_embed.proj.weight"), requires_grad=False ) head_dim = config.embed_dim // config.num_heads # TODO: replace with static positional embeddings once implemented theta = 10000.0 dim = head_dim // 2 inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2, dtype=torch.float) / dim)) self.register_buffer("inv_freq", inv_freq, persistent=False) self.blocks = nn.ModuleList( [ Qwen2VLVisionBlock( prefix=f"{prefix}.blocks.{i}", config=config, weights=weights, ) for i in range(config.depth) ] ) self.merger = Qwen2VLPatchMerger( prefix=f"{prefix}.merger", config=config, weights=weights, ) self.temporal_patch_size = config.temporal_patch_size self.spatial_patch_size = config.spatial_patch_size self.in_channels = config.in_channels self.embed_dim = config.embed_dim def apply_class_embedding(self, hidden_state: torch.Tensor) -> torch.Tensor: batch_size, _, hidden_size = hidden_state.shape class_embedding = self.class_embedding.expand(batch_size, 1, hidden_size) hidden_state = torch.cat([class_embedding, hidden_state], dim=1) return hidden_state def forward( self, pixel_values: torch.Tensor, grid_thw: Optional[torch.LongTensor] = None, ) -> torch.Tensor: # reshape the input tensor for processing shape = ( -1, self.in_channels, self.temporal_patch_size, self.spatial_patch_size, self.spatial_patch_size, ) pixel_values = pixel_values.view(shape).to(self.patch_embedding.weight.dtype) hidden_states = self.patch_embedding(pixel_values).view(-1, self.embed_dim) # TODO: revisit to see if we can avoid some of these reshapes # find the position ids for the input tensor based on the grid_thw pos_ids = [] for t, h, w in grid_thw: hpos_ids = torch.arange(h).unsqueeze(1).expand(-1, w) hpos_ids = hpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) hpos_ids = hpos_ids.permute(0, 2, 1, 3) hpos_ids = hpos_ids.flatten() wpos_ids = torch.arange(w).unsqueeze(0).expand(h, -1) wpos_ids = wpos_ids.reshape( h // self.spatial_merge_size, self.spatial_merge_size, w // self.spatial_merge_size, self.spatial_merge_size, ) wpos_ids = wpos_ids.permute(0, 2, 1, 3) wpos_ids = wpos_ids.flatten() pos_ids.append(torch.stack([hpos_ids, wpos_ids], dim=-1).repeat(t, 1)) pos_ids = torch.cat(pos_ids, dim=0) max_grid_size = grid_thw[:, 1:].max() # apply the positional embeddings to the position ids seq = torch.arange( max_grid_size, device=self.inv_freq.device, dtype=self.inv_freq.dtype ) rotary_pos_emb_full = torch.outer(seq, self.inv_freq) rotary_pos_emb = rotary_pos_emb_full[pos_ids].flatten(1) rotary_pos_emb = rotary_pos_emb.to(hidden_states.device, hidden_states.dtype) cos = rotary_pos_emb.cos() sin = rotary_pos_emb.sin() cos = torch.cat((cos, cos), dim=-1).unsqueeze(1) sin = torch.cat((sin, sin), dim=-1).unsqueeze(1) # create a cu_seqlens tensor to be used in the attention mask cu_seqlens = torch.repeat_interleave( grid_thw[:, 1] * grid_thw[:, 2], grid_thw[:, 0] ).cumsum(dim=0, dtype=torch.int32) cu_seqlens = F.pad(cu_seqlens, (1, 0), value=0) max_seqlen = torch.max(cu_seqlens[1:] - cu_seqlens[:-1]) # iterately apply the blocks to the hidden states lazy_mode = htorch.utils.internal.is_lazy() if lazy_mode: htorch.core.mark_step() for block in self.blocks: hidden_states = block(hidden_states, cu_seqlens, cos, sin, max_seqlen) if lazy_mode: htorch.core.mark_step() # apply the final patch merger to the hidden states hidden_states = self.merger(hidden_states) return hidden_states class Qwen2VLForConditionalGeneration(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config config.vision_config.quantize = None config.vision_config.speculator = config.speculator # set rope_scaling.type == "mrope" since AutoConfig.from_pretrained incorrectly # returns rope_scaling.type == "default" for Qwen2-VL model at the moment if ( hasattr(config, "rope_scaling") and config.rope_scaling is not None and config.rope_scaling.get("type", None) == "default" ): config.rope_scaling.update({"rope_type": "mrope"}) self.hidden_size = config.hidden_size self.vision_start_token_id = config.vision_start_token_id self.vision_end_token_id = config.vision_end_token_id self.image_token_id = config.image_token_id self.video_token_id = config.video_token_id self.spatial_merge_size = config.vision_config.spatial_merge_size self.embed_tokens = TensorParallelEmbedding( prefix="model.embed_tokens", weights=weights ) self.visual = Qwen2VisionModel( prefix="visual", config=config.vision_config, weights=weights ) self.text_model = Qwen2Model(prefix=None, config=config, weights=weights) if config.tie_word_embeddings: suffix = "model.embed_tokens" else: suffix = "lm_head" self.lm_head = SpeculativeHead.load( config, prefix=suffix if not prefix else f"{prefix}.{suffix}", weights=weights, ) self.norm = FastRMSNorm.load( prefix="model.norm", weights=weights, eps=config.rms_norm_eps, ) self.device = weights.device # based on https://github.com/huggingface/transformers/blob/e284c7e954abe12c34b50461c17f8115a0afe115/src/transformers/models/qwen2_vl/modeling_qwen2_vl.py#L1391 # modified to first find segments then initialize position ids for each segment # Steps: # locate all vision and text segments # calculate `vision_segment_lengths` for each vision segment to be use as offset # calculate `text_segment_lengths` for each text segment to be used as offset # create position ids for each vision segment based on the image grid # create position ids for each text segment # combine all the position ids # the final segment is the difference between the last vision segment and the end of the input # combine all the position ids and reshape to (3, input_ids_len) then swap dimensions to (input_ids_len, 3) def get_position_ids( self, input_ids: torch.Tensor, image_grid_thw: Optional[torch.Tensor] = None, ) -> torch.Tensor: if image_grid_thw is None: return ( torch.arange(input_ids.shape[0], device=input_ids.device) .unsqueeze(1) .repeat(1, 3) ) spatial_merge_size = self.spatial_merge_size vision_start_token_id = self.vision_start_token_id vision_end_token_id = self.vision_end_token_id device = input_ids.device dtype = input_ids.dtype input_ids_len = input_ids.shape[0] vision_starts = torch.where(input_ids == vision_start_token_id)[0] vision_ends = torch.where(input_ids == vision_end_token_id)[0] vision_segments = torch.stack((vision_starts, vision_ends), dim=1) prev_vision_end = torch.cat( [torch.zeros(1, device=vision_ends.device, dtype=dtype), vision_ends[:-1]] ) text_lengths_between_vision = vision_segments[:, 0] - prev_vision_end + 1 vision_widths_max = torch.cat( [ torch.zeros(1, device=image_grid_thw.device, dtype=dtype), image_grid_thw[:-1, 2] // spatial_merge_size, ] ) vision_segment_lengths = vision_widths_max + text_lengths_between_vision vision_segment_lengths = vision_segment_lengths.cumsum(dim=0) text_segment_lengths = vision_segment_lengths - text_lengths_between_vision # create position ids for each vision segment based on the image grid llm_pos_ids_list = [] for i, _ in enumerate(vision_segments): t, h, w = ( image_grid_thw[i][0], image_grid_thw[i][1] // spatial_merge_size, image_grid_thw[i][2] // spatial_merge_size, ) t_indices = torch.arange(t, device=device).repeat_interleave(h * w) h_indices = torch.arange(h, device=device).repeat_interleave(w).repeat(t) w_indices = torch.arange(w, device=device).repeat(t * h) image_position_ids = torch.stack([t_indices, h_indices, w_indices], dim=0) # offset by the position of the last vision segment im = image_position_ids + vision_segment_lengths[i] llm_pos_ids_list.append(im) # create position ids for each text segment text_ranges = [ torch.arange(seq_len, device=device).view(1, -1).expand(3, -1) + text_segment_lengths[i] for i, seq_len in enumerate(text_lengths_between_vision) ] full_llm_pos_ids_list = [ item for sublist in zip(text_ranges, llm_pos_ids_list) for item in sublist ] max_s = full_llm_pos_ids_list[-1].max() + 1 final_text_len = input_ids_len - vision_ends[-1] if final_text_len > 0: m = torch.arange(final_text_len, device=device).view(1, -1).expand(3, -1) full_llm_pos_ids_list.append(m + max_s) position_ids = ( torch.cat(full_llm_pos_ids_list, dim=1).reshape(3, -1).transpose(0, 1) ) return position_ids def get_vision_embeds( self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.FloatTensor] = None, image_sizes: Optional[torch.Tensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, ): image_embeds = self.visual(pixel_values, grid_thw=image_grid_thw).squeeze(0) return image_embeds def get_inputs_embeds( self, input_ids: torch.Tensor, vision_embeds: torch.Tensor = None, ): inputs_embeds = self.embed_tokens(input_ids) # apply the visual model to the pixel values if they are provided if vision_embeds is not None: mask = torch.where(input_ids == self.image_token_id) inputs_embeds[mask] = vision_embeds return inputs_embeds def forward( self, inputs_embeds: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], slots: torch.Tensor, seqlen: Seqlen, hpu_attention_meta: Optional[HPUPagedAttentionMetadata], lm_head_indices: Optional[torch.Tensor], attention_mask: Optional[torch.BoolTensor] = None, adapter_data: Optional[torch.Tensor] = None, image_indices=None, ): hidden_states = self.text_model( inputs_embeds=inputs_embeds, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, slots=slots, seqlen=seqlen, hpu_attention_meta=hpu_attention_meta, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits, speculative_logits = self.lm_head(hidden_states) return logits, speculative_logits
text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/models/custom_modeling/qwen2_vl.py", "repo_id": "text-generation-inference", "token_count": 9718 }
291
import datetime import torch import os from loguru import logger from pathlib import Path from safetensors.torch import save_file, load_file, _find_shared_tensors, _is_complete from typing import List, Dict from collections import defaultdict def _remove_duplicate_names( state_dict: Dict[str, torch.Tensor], *, preferred_names: List[str] = None, discard_names: List[str] = None, ) -> Dict[str, List[str]]: if preferred_names is None: preferred_names = [] preferred_names = set(preferred_names) if discard_names is None: discard_names = [] discard_names = set(discard_names) shareds = _find_shared_tensors(state_dict) to_remove = defaultdict(list) for shared in shareds: complete_names = set( [name for name in shared if _is_complete(state_dict[name])] ) if not complete_names: if len(shared) == 1: # Force contiguous name = list(shared)[0] state_dict[name] = state_dict[name].clone() complete_names = {name} else: raise RuntimeError( f"Error while trying to find names to remove to save state dict, but found no suitable name to keep for saving amongst: {shared}. None is covering the entire storage.Refusing to save/load the model since you could be storing much more memory than needed. Please refer to https://huggingface.co/docs/safetensors/torch_shared_tensors for more information. Or open an issue." ) keep_name = sorted(list(complete_names))[0] # Mecanism to preferentially select keys to keep # coming from the on-disk file to allow # loading models saved with a different choice # of keep_name preferred = complete_names.difference(discard_names) if preferred: keep_name = sorted(list(preferred))[0] if preferred_names: preferred = preferred_names.intersection(complete_names) if preferred: keep_name = sorted(list(preferred))[0] for name in sorted(shared): if name != keep_name: to_remove[keep_name].append(name) return to_remove def convert_file(pt_file: Path, sf_file: Path, discard_names: List[str]): """ Convert a pytorch file to a safetensors file This will remove duplicate tensors from the file. Unfortunately, this might not respect *transformers* convention. Forcing us to check for potentially different keys during load when looking for specific tensors (making tensor sharing explicit). """ loaded = torch.load(pt_file, map_location="cpu", weights_only=True) if "state_dict" in loaded: loaded = loaded["state_dict"] to_removes = _remove_duplicate_names(loaded, discard_names=discard_names) metadata = {"format": "pt"} for kept_name, to_remove_group in to_removes.items(): for to_remove in to_remove_group: if to_remove not in metadata: metadata[to_remove] = kept_name del loaded[to_remove] # Force tensors to be contiguous loaded = {k: v.contiguous() for k, v in loaded.items()} dirname = os.path.dirname(sf_file) os.makedirs(dirname, exist_ok=True) save_file(loaded, sf_file, metadata=metadata) reloaded = load_file(sf_file) for k in loaded: pt_tensor = loaded[k] sf_tensor = reloaded[k] if not torch.equal(pt_tensor, sf_tensor): raise RuntimeError(f"The output tensors do not match for key {k}") def convert_files(pt_files: List[Path], sf_files: List[Path], discard_names: List[str]): assert len(pt_files) == len(sf_files) N = len(pt_files) # We do this instead of using tqdm because we want to parse the logs with the launcher for i, (pt_file, sf_file) in enumerate(zip(pt_files, sf_files)): # Skip blacklisted files if ( "arguments" in pt_file.name or "args" in pt_file.name or "training" in pt_file.name ): continue start = datetime.datetime.now() convert_file(pt_file, sf_file, discard_names) elapsed = datetime.datetime.now() - start logger.info(f"Convert: [{i + 1}/{N}] -- Took: {elapsed}")
text-generation-inference/backends/gaudi/server/text_generation_server/utils/convert.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/utils/convert.py", "repo_id": "text-generation-inference", "token_count": 1775 }
292
# Copyright (C) 2024 Habana Labs, Ltd. an Intel Company. import re from typing import List, Optional, Tuple, Set, Union import torch from text_generation_server.pb import generate_pb2 from text_generation_server.pb.generate_pb2 import FinishReason, GrammarType from text_generation_server.utils.logits_process import ( FrequencyPenaltyLogitsProcessor, GrammarLogitProcessor, HeterogeneousProcessorWrapper, HeterogeneousRepetitionPenaltyLogitsProcessor, HeterogeneousFrequencyPenaltyLogitsProcessor, HeterogeneousTemperatureLogitsWarper, HeterogeneousTopKLogitsWarper, HeterogeneousTopPLogitsWarper, HeterogeneousTypicalLogitsWarper, HeterogeneousGrammarLogitProcessor, static_warper, ) from text_generation_server.utils.watermark import WatermarkLogitsProcessor from transformers import PreTrainedTokenizerBase, RepetitionPenaltyLogitsProcessor import os class NextTokenChooser: def __init__( self, watermark: bool = False, temperature: float = 1.0, repetition_penalty: float = 1.0, frequency_penalty: float = 0.0, top_k: Optional[int] = None, top_p: Optional[float] = None, typical_p: Optional[float] = None, do_sample: bool = False, seed: int = 0, device: str = "cpu", tokenizer: Optional[PreTrainedTokenizerBase] = None, grammar: str = "", grammar_type: GrammarType = GrammarType.GRAMMAR_TYPE_NONE, fsm_grammar_state: int = 0, ): self.watermark_processor = ( WatermarkLogitsProcessor(device=device) if watermark else None ) self.repetition_processor = ( RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty) if repetition_penalty and repetition_penalty != 1.0 else None ) self.frequency_processor = ( FrequencyPenaltyLogitsProcessor(penalty=frequency_penalty) if frequency_penalty and frequency_penalty != 0.0 else None ) self.grammar_processor = ( GrammarLogitProcessor(tokenizer, device, grammar, grammar_type) if grammar != "" else None ) self.tokenizer = tokenizer has_warpers = ( (temperature is not None and temperature != 1.0) or (top_k is not None and top_k != 0) or (top_p is not None and top_p < 1.0) or (typical_p is not None and typical_p < 1.0) ) if has_warpers: self.static_warper = static_warper( temperature=temperature, top_k=top_k, top_p=top_p, typical_p=typical_p ) else: self.static_warper = None sampling = do_sample or has_warpers self.choice = Sampling(seed, device) if sampling else Greedy() self.fsm_grammar_state = fsm_grammar_state self.grammar = grammar def __call__(self, input_ids, scores): if self.watermark_processor is not None: scores = self.watermark_processor(input_ids, scores) if self.repetition_processor is not None: scores = self.repetition_processor(input_ids, scores) if self.frequency_processor is not None: scores = self.frequency_processor(input_ids, scores) if self.grammar_processor is not None: scores = self.grammar_processor(scores, self.fsm_grammar_state) if self.static_warper is None: next_logprob = torch.log_softmax(scores, -1) else: scores, next_logprob = self.static_warper(scores) next_id = self.choice(scores[-1]).view(1, 1) return next_id, next_logprob def advance_grammar(self, next_id: int): if self.grammar_processor is not None: self.fsm_grammar_state = self.grammar_processor.advance( next_id, self.fsm_grammar_state ) return self @classmethod def from_pb( cls, pb: generate_pb2.NextTokenChooserParameters, device: torch.device, tokenizer: PreTrainedTokenizerBase, ) -> "NextTokenChooser": return NextTokenChooser( watermark=pb.watermark, temperature=pb.temperature, repetition_penalty=pb.repetition_penalty, frequency_penalty=pb.frequency_penalty, top_k=pb.top_k, top_p=pb.top_p, typical_p=pb.typical_p, do_sample=pb.do_sample, seed=pb.seed, device=device, tokenizer=tokenizer, grammar=pb.grammar, grammar_type=pb.grammar_type, ) class StopSequenceCriteria: def __init__(self, stop_sequence: str): stop_sequence = re.escape(stop_sequence) self.regex = re.compile(f"{stop_sequence}$") def __call__(self, output: str) -> bool: if self.regex.findall(output): return True return False class StoppingCriteria: def __init__( self, eos_token_ids: Optional[Union[Set[int], int]], stop_sequence_criterias: List[StopSequenceCriteria], max_new_tokens: int = 20, ignore_eos_token: bool = False, ): if eos_token_ids is None: eos_token_ids = set() elif isinstance(eos_token_ids, int): eos_token_ids = set([eos_token_ids]) elif isinstance(eos_token_ids, set): eos_token_ids = eos_token_ids else: raise RuntimeError( f"eos_token_ids is of invalid type {type(eos_token_ids)}, expected int, None or set[int]" ) self.eos_token_ids = eos_token_ids self.stop_sequence_criterias = stop_sequence_criterias self.max_new_tokens = max_new_tokens self.current_tokens = 0 self.current_output = "" if os.getenv("TEXT_GENERATION_SERVER_IGNORE_EOS_TOKEN", "false") == "true": self.ignore_eos_token = True else: self.ignore_eos_token = ignore_eos_token def __call__(self, last_token: int, last_output: str) -> Tuple[bool, Optional[str]]: self.current_tokens += 1 if self.current_tokens >= self.max_new_tokens: return True, FinishReason.FINISH_REASON_LENGTH if isinstance(last_token, torch.Tensor): last_token = last_token.item() if not self.ignore_eos_token and last_token in self.eos_token_ids: return True, FinishReason.FINISH_REASON_EOS_TOKEN if self.stop_sequence_criterias: self.current_output += last_output # There is no need to keep an output that is too long if len(self.current_output) > 300: # Slice to -200 to avoid doing it all the time self.current_output = self.current_output[-200:] for stop_sequence_criteria in self.stop_sequence_criterias: if stop_sequence_criteria(self.current_output): return True, FinishReason.FINISH_REASON_STOP_SEQUENCE return False, None @classmethod def from_pb( cls, pb: generate_pb2.StoppingCriteriaParameters, tokenizer: PreTrainedTokenizerBase, ) -> "StoppingCriteria": stop_sequence_criterias = [ StopSequenceCriteria(sequence) for sequence in pb.stop_sequences ] # TODO Hack because eos_token_id cannot be what we want. eos_token_id = getattr(tokenizer, "_eos_token_ids", tokenizer.eos_token_id) return StoppingCriteria( eos_token_id, stop_sequence_criterias, pb.max_new_tokens, pb.ignore_eos_token, ) def create_n_gram_speculation( input_ids: torch.Tensor, next_ids: torch.Tensor, accepted_ids: torch.Tensor, speculate: int, verbose: bool, ): # Very trivial approach, find first match in the string. # This is much less refined than actual n-gram but seems to work # relatively OK in grounded mode and is by far much faster with # much less worst case complexity as everything happens on device. B = accepted_ids.shape[0] device = input_ids.device seeds = next_ids[accepted_ids.cumsum(dim=-1) - 1] indices = (input_ids == seeds.unsqueeze(-1)).max(dim=1).indices + 1 all_indices = indices.unsqueeze(-1).expand(B, speculate) + torch.arange( speculate, device=device ) all_indices = torch.clamp(all_indices, max=input_ids.shape[1] - 1) speculative_ids = input_ids.gather(dim=-1, index=all_indices) return speculative_ids class HeterogeneousNextTokenChooser: def __init__( self, dtype: torch.dtype, device: torch.device, watermark: List[bool], temperature: List[float], repetition_penalty: List[float], frequency_penalty: List[float], top_k: List[int], top_p: List[float], typical_p: List[float], do_sample: List[bool], seeds: List[int], tokenizer: PreTrainedTokenizerBase, grammars: List[str], grammar_types: List[int], fsm_grammar_states: List[int], quantization_enabled: bool, ): warpers = [] # TODO: enable watermark with FP8 quantization self.watermark_processor = ( HeterogeneousProcessorWrapper( { i: WatermarkLogitsProcessor(device=device) for i, do_watermark in enumerate(watermark) if do_watermark } ) if any(watermark) and not quantization_enabled else None ) self.repetition_processor = ( HeterogeneousRepetitionPenaltyLogitsProcessor( repetition_penalty, dtype, device ) if any([x != 1.0 for x in repetition_penalty]) else None ) self.frequency_processor = ( HeterogeneousFrequencyPenaltyLogitsProcessor( frequency_penalty, dtype, device ) if any([x != 0.0 for x in frequency_penalty]) else None ) self.grammar_processor = ( HeterogeneousGrammarLogitProcessor( tokenizer, device, grammars, grammar_types ) if any([grammar != "" for grammar in grammars]) else None ) if any(x != 1.0 for x in temperature): do_sample = [ sample or x != 1.0 for x, sample in zip(temperature, do_sample) ] warpers.append( HeterogeneousTemperatureLogitsWarper(temperature, dtype, device) ) if any(x != 0 for x in top_k): do_sample = [sample or x != 0 for x, sample in zip(top_k, do_sample)] warpers.append(HeterogeneousTopKLogitsWarper(top_k, device)) if any(x < 1.0 for x in top_p): do_sample = [sample or x < 1.0 for x, sample in zip(top_p, do_sample)] warpers.append(HeterogeneousTopPLogitsWarper(top_p, dtype, device)) if any(x < 1.0 for x in typical_p): do_sample = [sample or x < 1.0 for x, sample in zip(typical_p, do_sample)] warpers.append(HeterogeneousTypicalLogitsWarper(typical_p, dtype, device)) self.warpers = warpers if any(do_sample): self.choice = HeterogeneousSampling(do_sample, seeds, device) else: self.choice = Greedy() self.seeds = seeds self.do_sample = do_sample self.dtype = dtype self.device = device self.tokenizer = tokenizer self.fsm_grammar_states = fsm_grammar_states self.grammars = grammars self.grammar_types = grammar_types def __call__( self, input_ids: torch.Tensor, scores: torch.Tensor, speculate: int, speculated_ids: Optional[torch.Tensor] = None, speculative_scores: Optional[torch.Tensor] = None, verbose=False, ): if speculated_ids is not None: B = scores.shape[0] // (speculated_ids.shape[1] + 1) S = speculated_ids.shape[1] + 1 scores = scores.view(B, S, -1) else: B = scores.shape[0] S = 1 scores = scores.view(B, S, -1) next_ids = torch.zeros((B, S), device=scores.device, dtype=torch.long) for j in range(S): _scores = scores[:, j] if self.watermark_processor is not None: _scores = self.watermark_processor(input_ids, _scores) if self.repetition_processor is not None: _scores = self.repetition_processor(input_ids, _scores) if self.frequency_processor is not None: _scores = self.frequency_processor(input_ids, _scores) if self.grammar_processor is not None: _scores = self.grammar_processor(_scores, self.fsm_grammar_states) for warper in self.warpers: _scores = warper(input_ids, _scores) _next_ids = self.choice(_scores) scores[:, j] = _scores next_ids[:, j] = _next_ids next_ids = next_ids.view(B * S) allscores = scores.view(B * S, -1) alllogprobs = torch.log_softmax(allscores, -1) if speculated_ids is not None: accepted_ids = [] B = next_ids.shape[0] // (speculated_ids.shape[1] + 1) S = speculated_ids.shape[1] + 1 indices = [] for i in range(B): _next_ids = next_ids[i * S : (i + 1) * S] _speculated_ids = speculated_ids[i] validate_speculative = _next_ids[:-1] == _speculated_ids index = i * S accepted = 1 # First is always valid indices.append(index) for valid in validate_speculative.tolist(): if valid: index += 1 accepted += 1 indices.append(index) else: break accepted_ids.append(accepted) accepted_ids = torch.tensor( accepted_ids, device=input_ids.device, dtype=input_ids.dtype ) next_ids = next_ids[indices] logprobs = alllogprobs[indices] indices = torch.arange(B, device=input_ids.device) * S if speculative_scores is not None: speculative_scores = speculative_scores[indices + accepted_ids - 1] else: accepted_ids = torch.ones_like(next_ids) logprobs = alllogprobs next_logprobs = torch.gather(logprobs, 1, next_ids.view(-1, 1)).view(-1) if speculate > 0: if speculative_scores is not None: # Medusa provided some scores speculative_ids = Greedy()(speculative_scores) else: # n-gram speculative_ids = create_n_gram_speculation( input_ids, next_ids, accepted_ids, speculate, verbose ) else: speculative_ids = None return next_ids, next_logprobs, alllogprobs, accepted_ids, speculative_ids def advance_grammar(self, next_ids: List[int]): if self.grammar_processor is not None: other_new_states = self.grammar_processor.advance_batch( next_ids, self.fsm_grammar_states ) self.fsm_grammar_states = other_new_states return self def advance_grammar_single(self, grammar_state_index: int, next_id: int): if self.grammar_processor is not None: self.fsm_grammar_states[grammar_state_index] = ( self.grammar_processor.advance_at_index( next_id, self.fsm_grammar_states[grammar_state_index], grammar_state_index, ) ) return self def advance_grammar_single_with_past_state( self, grammar_state_index: int, next_id: torch.Tensor, past_state: int ): if self.grammar_processor is not None: next_id = next_id.item() self.fsm_grammar_states[grammar_state_index] = ( self.grammar_processor.advance_at_index( next_id, past_state, grammar_state_index, ) ) return self def filter(self, indices): if self.watermark_processor is not None: self.watermark_processor = self.watermark_processor.filter(indices) if self.repetition_processor is not None: self.repetition_processor = self.repetition_processor.filter(indices) if self.frequency_processor is not None: self.frequency_processor = self.frequency_processor.filter(indices) if self.grammar_processor is not None: self.grammar_processor = self.grammar_processor.filter(indices) filtered_warpers = [] for warper in self.warpers: filtered_warper = warper.filter(indices) if filtered_warper is not None: filtered_warpers.append(filtered_warper) self.warpers = filtered_warpers self.seeds = [self.seeds[i] for i in indices] self.do_sample = [self.do_sample[i] for i in indices] new_grammars = [] new_fsm_grammar_states = [] new_grammar_types = [] for i in indices: new_grammars.append(self.grammars[i]) new_fsm_grammar_states.append(self.fsm_grammar_states[i]) new_grammar_types.append(self.grammar_types[i]) self.grammars = new_grammars self.fsm_grammar_states = new_fsm_grammar_states self.grammar_types = new_grammar_types if any(self.do_sample): self.choice.filter(indices) else: self.choice = Greedy() return self @classmethod def from_pb( cls, pb: List[generate_pb2.NextTokenChooserParameters], dtype: torch.dtype, device: torch.device, tokenizer: PreTrainedTokenizerBase, fsm_grammar_states: Optional[List[int]] = None, quantization_enabled: bool = False, ) -> "HeterogeneousNextTokenChooser": return HeterogeneousNextTokenChooser( watermark=[pb_.watermark for pb_ in pb], temperature=[pb_.temperature for pb_ in pb], repetition_penalty=[pb_.repetition_penalty for pb_ in pb], frequency_penalty=[pb_.frequency_penalty for pb_ in pb], top_k=[pb_.top_k for pb_ in pb], top_p=[pb_.top_p for pb_ in pb], typical_p=[pb_.typical_p for pb_ in pb], do_sample=[pb_.do_sample for pb_ in pb], seeds=[pb_.seed for pb_ in pb], device=device, dtype=dtype, tokenizer=tokenizer, grammars=[pb_.grammar for pb_ in pb], grammar_types=[pb_.grammar_type for pb_ in pb], fsm_grammar_states=( fsm_grammar_states if fsm_grammar_states else [0] * len(pb) ), quantization_enabled=quantization_enabled, ) def pad_next_token_chooser_parameters( parameters: List[generate_pb2.NextTokenChooserParameters], expected_size: int, ) -> List[generate_pb2.NextTokenChooserParameters]: # disable all logits processors to minimize padding overhead empty_parameters = generate_pb2.NextTokenChooserParameters( temperature=1.0, top_k=0, top_p=1.0, typical_p=1.0, do_sample=False, seed=0, repetition_penalty=1.0, frequency_penalty=0.0, watermark=False, grammar="", grammar_type=0, ) parameters.extend([empty_parameters] * (expected_size - len(parameters))) return parameters class Sampling: def __init__(self, seed: int, device: str = "cpu"): if device in ["hpu", torch.device("hpu")]: import habana_frameworks.torch.hpu.random as htrandom self.generator = htrandom.default_generators[0].manual_seed(seed) else: self.generator = torch.Generator("cpu") self.generator.manual_seed(seed) self.seed = seed def __call__(self, logits): probs = torch.nn.functional.softmax(logits, -1) # Avoid GPU<->CPU sync done by torch multinomial # See: https://github.com/pytorch/pytorch/blob/925a3788ec5c06db62ca732a0e9425a26a00916f/aten/src/ATen/native/Distributions.cpp#L631-L637 q = torch.empty_like(probs).exponential_(1, generator=self.generator) return probs.div_(q).argmax() class Greedy: def __call__(self, logits): return logits.argmax(dim=-1) class HeterogeneousSampling: r""" Mixed greedy and probabilistic sampling. Compute both and pick the right one for each sample. """ def __init__(self, do_sample: List[bool], seeds: List[int], device: torch.device): self.seeds = seeds self.greedy_indices = [] self.sampling_mapping = {} for i, (sample, seed) in enumerate(zip(do_sample, seeds)): if sample: self.sampling_mapping[i] = Sampling(seed, device) else: self.greedy_indices.append(i) self.greedy = Greedy() def __call__(self, logits): out = torch.zeros(logits.shape[0], dtype=torch.int64, device=logits.device) if self.greedy_indices: # Computing for all indices is faster than slicing torch.argmax(logits, -1, out=out) for i, sampling in self.sampling_mapping.items(): out[i] = sampling(logits[i]) return out def filter(self, indices): new_greedy_indices = [] new_sampling_mapping = {} for i, idx in enumerate(indices): if idx in self.sampling_mapping: new_sampling_mapping[i] = self.sampling_mapping[idx] else: new_greedy_indices.append(i) self.greedy_indices = new_greedy_indices self.sampling_mapping = new_sampling_mapping return self def batch_top_tokens( top_n_tokens: List[int], top_n_tokens_tensor: torch.Tensor, logprobs: torch.Tensor, accepted_ids: torch.Tensor, ) -> Tuple[List[List[List[int]]], List[List[List[float]]]]: """Find the top n most likely tokens for a batch of generations. When multiple tokens have equal probabilities and they don't all fit, the remaining tokens are also returned. """ max_top_n = max(top_n_tokens) # Early exit when top_n_tokens is not used if max_top_n == 0: return [[[]]] * len(top_n_tokens), [[[]]] * len(top_n_tokens) batch_size = accepted_ids.shape[0] speculate_size = logprobs.shape[0] // batch_size top_n_tokens_tensor = top_n_tokens_tensor.repeat_interleave(speculate_size) # Ensure top_n doesn't exceed vocab size top_n_tokens = [ min(tok, logprobs.size(-1)) for tok in top_n_tokens for _ in range(speculate_size) ] # Parallel kthvalue adapted from https://discuss.pytorch.org/t/how-to-efficiently-get-the-k-th-largest-values-in-parallel/160529/2 # Sorted topk is faster than torch.sort() since we only need a small subset sorted_top_k = torch.topk(logprobs, k=max_top_n, dim=-1, sorted=True).values nth_highest = torch.gather( sorted_top_k, 1, (top_n_tokens_tensor - 1).clip(min=0).unsqueeze(1) ) nth_highest[nth_highest == -float("inf")] = torch.finfo(logprobs.dtype).min # Find the new "fuzzy" top n values top_n_indices = (logprobs >= nth_highest).nonzero() _, top_n_ishes = torch.unique_consecutive(top_n_indices[:, 0], return_counts=True) k = 1 if top_n_ishes.numel() == 0 else top_n_ishes.max() # Take a new topk for these new max n values top_k = torch.topk(logprobs, k=k, dim=1, sorted=True) top_n_ishes = top_n_ishes.tolist() top_indices = top_k.indices.tolist() top_values = top_k.values.tolist() batch_top_token_ids = [] batch_top_token_logprobs = [] accepted_ids_list = accepted_ids.tolist() for i, n_accepted_ids in enumerate(accepted_ids_list): start = speculate_size * i stop = speculate_size * (i + 1) _top_indices = top_indices[start:stop] _top_values = top_values[start:stop] _top_n_ishes = top_n_ishes[start:stop] _top_n_tokens = top_n_tokens[start:stop] _top_indices = _top_indices[:n_accepted_ids] _top_values = _top_values[:n_accepted_ids] _top_n_ishes = _top_n_ishes[:n_accepted_ids] _top_n_tokens = _top_n_tokens[:n_accepted_ids] row_top_token_ids = [] row_top_token_logprobs = [] for idxs, vals, n, req_n in zip( _top_indices, _top_values, _top_n_ishes, _top_n_tokens ): indices = idxs[:n] if req_n > 0 else [] values = vals[:n] if req_n > 0 else [] row_top_token_ids.append(indices) row_top_token_logprobs.append(values) batch_top_token_ids.append(row_top_token_ids) batch_top_token_logprobs.append(row_top_token_logprobs) return batch_top_token_ids, batch_top_token_logprobs def make_tokenizer_optional(tokenizer): class _(type(tokenizer)): def __call__( self, text, return_tensors, padding, return_token_type_ids, truncation, max_length, ): assert ( return_tensors == "pt" ), "inccorrect input arguments when calling TransparentTokenizer" assert ( padding == "max_length" or padding == "longest" ), "inccorrect input arguments when calling TransparentTokenizer" assert ( not return_token_type_ids ), "inccorrect input arguments when calling TransparentTokenizer" assert ( truncation ), "inccorrect input arguments when calling TransparentTokenizer" def str_token_to_int(i): if i == "?": return tokenizer.pad_token_id else: return int(i) all_tokens = [ [str_token_to_int(i.strip()) for i in inner_text.split(",")] for inner_text in text ] if padding == "longest": max_length = max(len(tokens) for tokens in all_tokens) return { "input_ids": torch.tensor( [ [tokenizer.pad_token_id] * (max_length - len(tokens)) + tokens for tokens in all_tokens ] ), "attention_mask": torch.tensor( [ [0] * (max_length - len(tokens)) + [1] * len(tokens) for tokens in all_tokens ] ), } def decode( self, token_ids, skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, **kwargs, ) -> str: # I don't think this method is used anywhere and should be removed when doing refactoring return ",".join(str(i) for i in to_py_obj(token_ids)) # noqa: F821 if os.getenv("SKIP_TOKENIZER_IN_TGI", "false").lower() == "true": tokenizer.__class__ = _ tokenizer.is_transparent = True def is_tokenizer_transparent(tokenizer): return hasattr(tokenizer, "is_transparent") and tokenizer.is_transparent is True
text-generation-inference/backends/gaudi/server/text_generation_server/utils/tokens.py/0
{ "file_path": "text-generation-inference/backends/gaudi/server/text_generation_server/utils/tokens.py", "repo_id": "text-generation-inference", "token_count": 13504 }
293
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. mkfile_path := $(abspath $(lastword $(MAKEFILE_LIST))) mkfile_dir := $(dir $(mkfile_path)) root_dir := "${mkfile_dir}/../.." .PHONY: image install_server test_server test_integration VERSION := $(shell gawk 'match($$0, /^version = "(.*)"/, a) {print a[1]}' ${root_dir}/Cargo.toml) image: docker build --rm -f ${root_dir}/Dockerfile.neuron \ --ulimit nofile=100000:100000 \ --build-arg VERSION=$(VERSION) \ -t text-generation-inference:$(VERSION)-neuron ${root_dir} docker tag text-generation-inference:$(VERSION)-neuron text-generation-inference:latest-neuron install_server: make -C ${mkfile_dir}/server install VERSION:=${VERSION} test_server: install_server python -m pip install -r ${mkfile_dir}/tests/requirements.txt python -m pytest -sv ${mkfile_dir}/tests/server
text-generation-inference/backends/neuron/Makefile/0
{ "file_path": "text-generation-inference/backends/neuron/Makefile", "repo_id": "text-generation-inference", "token_count": 467 }
294
set(SPDLOG_USE_FMT ON) set(SPDLOG_BUILD_SHARED OFF) set(SPDLOG_FMT_EXTERNAL OFF) # Define the level at which SPDLOG_ compilation level is defined if (${CMAKE_BUILD_TYPE} STREQUAL "Debug") add_compile_definitions(SPDLOG_ACTIVE_LEVEL SPDLOG_LEVEL_TRACE) else () add_compile_definitions(SPDLOG_ACTIVE_LEVEL SPDLOG_LEVEL_DEBUG) endif () fetchcontent_declare( spdlog # DOWNLOAD_EXTRACT_TIMESTAMP URL https://github.com/gabime/spdlog/archive/refs/tags/v1.15.0.tar.gz ) fetchcontent_makeavailable(spdlog)
text-generation-inference/backends/trtllm/cmake/spdlog.cmake/0
{ "file_path": "text-generation-inference/backends/trtllm/cmake/spdlog.cmake", "repo_id": "text-generation-inference", "token_count": 245 }
295
[package] name = "text-generation-router-v2" description = "Text Generation Webserver" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [lib] path = "src/lib.rs" [[bin]] name = "text-generation-router-v2" path = "src/main.rs" [dependencies] async-trait = "0.1.74" async-stream = "0.3.5" axum = { version = "0.7", features = ["json"] } axum-tracing-opentelemetry = "0.16" text-generation-router = { path = "../../router" } clap = { version = "4.4.5", features = ["derive", "env"] } grpc-metadata = { path = "../grpc-metadata" } futures = "0.3.28" hf-hub = { workspace = true } jsonschema = { version = "0.28.0" } metrics = { workspace = true } metrics-exporter-prometheus = { workspace = true } nohash-hasher = "0.2.0" opentelemetry = { version = "0.20.0", features = ["rt-tokio"] } opentelemetry-otlp = "0.13.0" rand = "0.8.5" reqwest = { version = "0.11.20", features = [] } serde = "1.0.188" serde_json = "1.0.107" slotmap = "1.0.7" thiserror = "1.0.48" tokenizers = { workspace = true } tokio = { version = "1.32.0", features = [ "rt", "rt-multi-thread", "parking_lot", "signal", "sync", ] } tokio-stream = "0.1.14" tower-http = { version = "0.5.1", features = ["cors"] } tracing = "0.1.37" tracing-opentelemetry = "0.21.0" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } utoipa = { version = "4.2.0", features = ["axum_extras"] } utoipa-swagger-ui = { version = "6.0.0", features = ["axum"] } init-tracing-opentelemetry = { version = "0.14.1", features = [ "opentelemetry-otlp", ] } minijinja = { workspace = true } minijinja-contrib = { workspace = true } futures-util = "0.3.30" regex = "1.10.3" once_cell = "1.19.0" image = "0.25.1" base64 = { workspace = true } prost = "^0.12" tonic = "^0.10" tower = "^0.4" [build-dependencies] tonic-build = "0.10.1" prost-build = "0.12.1" [features] default = ["ngrok"] ngrok = ["text-generation-router/ngrok"] google = ["text-generation-router/google"] kserve = ["text-generation-router/kserve"]
text-generation-inference/backends/v2/Cargo.toml/0
{ "file_path": "text-generation-inference/backends/v2/Cargo.toml", "repo_id": "text-generation-inference", "token_count": 869 }
296
use crate::client::Health; /// Multi shard Client use crate::client::{ClientError, Result}; use crate::client::grpc_client::{DecodeTimings, PrefillTimings}; use crate::client::{ Batch, CachedBatch, Client, Generation, GrammarType, HealthResponse, NextTokenChooserParameters, Request, StoppingCriteriaParameters, }; use crate::client::{Chunk, InfoResponse, Input}; use async_trait::async_trait; use futures::future::join_all; use tonic::transport::Uri; use tracing::instrument; #[derive(Debug, Clone)] /// Text Generation Inference gRPC multi client pub struct ShardedClient { clients: Vec<Client>, } impl ShardedClient { fn new(clients: Vec<Client>) -> Self { Self { clients } } /// Create a new ShardedClient from a master client. The master client will communicate with /// the other shards and returns all uris/unix sockets with the `service_discovery` gRPC method. async fn from_master_client(mut master_client: Client) -> Result<Self> { // Get all uris/unix sockets from the master client let uris = master_client.service_discovery().await?; let futures = uris.into_iter().map(Client::connect_uds); let clients: Result<Vec<Client>> = join_all(futures).await.into_iter().collect(); Ok(Self::new(clients?)) } /// Returns a client connected to the given uri #[allow(dead_code)] pub async fn connect(uri: Uri) -> Result<Self> { let master_client = Client::connect(uri).await?; Self::from_master_client(master_client).await } /// Returns a client connected to the given unix socket pub async fn connect_uds(path: String) -> Result<Self> { let master_client = Client::connect_uds(path).await?; Self::from_master_client(master_client).await } /// Get the model info #[instrument(skip(self))] pub async fn info(&mut self) -> Result<InfoResponse> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| client.info()) .collect(); join_all(futures).await.pop().unwrap() } /// GRPC health check #[instrument(skip(self))] pub async fn health(&mut self) -> Result<HealthResponse> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| client.health()) .collect(); join_all(futures).await.pop().unwrap() } /// Clear the past generations cache #[instrument(skip(self))] pub async fn clear_cache(&mut self, batch_id: Option<u64>) -> Result<()> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| client.clear_cache(batch_id)) .collect(); join_all(futures).await.into_iter().collect() } /// Filter a cached batch #[instrument(skip(self))] pub async fn filter_batch( &mut self, batch_id: u64, request_ids: Vec<u64>, ) -> Result<Option<CachedBatch>> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| Box::pin(client.filter_batch(batch_id, request_ids.clone()))) .collect(); // all shards return the same message join_all(futures).await.pop().unwrap() } /// Warmup on a max size batch /// /// Returns the maximum amount of tokens supported by the hardware #[instrument(skip(self))] pub async fn warmup( &mut self, max_input_length: Option<u32>, max_prefill_tokens: u32, max_total_tokens: Option<u32>, max_batch_size: Option<usize>, ) -> Result<(Option<u32>, u32, u32)> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| { Box::pin(client.warmup( max_input_length, max_prefill_tokens, max_total_tokens, max_batch_size, )) }) .collect(); let results = join_all(futures) .await .into_iter() .collect::<Result<Vec<(Option<u32>, u32, u32)>>>()?; // Take the minimum value // Different shards hold different parts of vocab, might yield // different available block size. let min = results .iter() .min() .expect("Expect at least 1 warmup result"); Ok(*min) } /// Generate one token for each request in the given batch /// /// Returns Generation for each request in batch /// and the next cached batch #[instrument(skip_all, fields(id = & batch.id, size = & batch.size))] pub async fn prefill( &mut self, batch: Batch, cached_batch: Option<CachedBatch>, ) -> Result<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| Box::pin(client.prefill(batch.clone(), cached_batch.clone()))) .collect(); #[allow(clippy::type_complexity)] let results: Result<Vec<(Vec<Generation>, Option<CachedBatch>, PrefillTimings)>> = join_all(futures).await.into_iter().collect(); let mut results = results?; let (mut generations, next_batch, mut timings) = results.pop().ok_or(ClientError::EmptyResults)?; // Merge generations from different model shards for (mut shard_generations, _, shard_timings) in results.into_iter() { generations.append(&mut shard_generations); // Return the timings of the slowest shard if shard_timings.total > timings.total { timings = shard_timings; } } Ok((generations, next_batch, timings)) } /// Generate one token for each request in the given cached batches /// /// Returns Generation for each request in batches /// and the next cached batch #[instrument(skip_all, fields(size = batches.iter().map(| batch | {batch.size}).sum::< u32 > ()))] pub async fn decode( &mut self, batches: Vec<CachedBatch>, ) -> Result<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)> { let futures: Vec<_> = self .clients .iter_mut() .map(|client| Box::pin(client.decode(batches.clone()))) .collect(); #[allow(clippy::type_complexity)] let results: Result<Vec<(Vec<Generation>, Option<CachedBatch>, DecodeTimings)>> = join_all(futures).await.into_iter().collect(); let mut results = results?; let (mut generations, next_batch, mut timings) = results.pop().ok_or(ClientError::EmptyResults)?; // Merge generations from different model shards for (mut shard_generations, _, shard_timings) in results.into_iter() { generations.append(&mut shard_generations); // Return the timings of the slowest shard if shard_timings.total > timings.total { timings = shard_timings; } } Ok((generations, next_batch, timings)) } } #[async_trait] impl Health for ShardedClient { async fn device_health(&self) -> Result<()> { self.clone().health().await?; Ok(()) } async fn model_health(&self) -> Result<()> { // Dummy batch of 1 token and 1 generated token let liveness_request = Request { id: u64::MAX, inputs: "liveness".to_string(), input_chunks: Some(Input { chunks: vec![Chunk::Text("liveness".into()).into()], }), truncate: 1, add_special_tokens: false, prefill_logprobs: false, parameters: Some(NextTokenChooserParameters { temperature: 1.0, top_k: 0, top_p: 1.0, typical_p: 1.0, do_sample: false, seed: 0, repetition_penalty: 1.0, frequency_penalty: 0.0, watermark: false, grammar: String::new(), grammar_type: GrammarType::None as i32, }), stopping_parameters: Some(StoppingCriteriaParameters { max_new_tokens: 1, stop_sequences: vec![], ignore_eos_token: false, }), top_n_tokens: 0, // Block 0 is reserved for health checks blocks: vec![0], slots: vec![0], cache_len: 0, adapter_id: None, chunk_len: None, }; let batch = Batch { id: u64::MAX, requests: vec![liveness_request], size: 1, max_tokens: 2, max_blocks: 1, }; self.clone().prefill(batch, None).await?; Ok(()) } }
text-generation-inference/backends/v3/src/client/sharded_client.rs/0
{ "file_path": "text-generation-inference/backends/v3/src/client/sharded_client.rs", "repo_id": "text-generation-inference", "token_count": 4181 }
297
# Legacy warning ⚠️ The inference clients from [huggingface_hub](https://huggingface.co/docs/huggingface_hub/guides/inference) are recommended over `text_generation`. # Text Generation The Hugging Face Text Generation Python library provides a convenient way of interfacing with a `text-generation-inference` instance running on [Hugging Face Inference Endpoints](https://huggingface.co/inference-endpoints) or on the Hugging Face Hub. ## Get Started ### Install ```shell pip install text-generation ``` ### Inference API Usage ```python from text_generation import InferenceAPIClient client = InferenceAPIClient("bigscience/bloomz") text = client.generate("Why is the sky blue?").generated_text print(text) # ' Rayleigh scattering' # Token Streaming text = "" for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` or with the asynchronous client: ```python from text_generation import InferenceAPIAsyncClient client = InferenceAPIAsyncClient("bigscience/bloomz") response = await client.generate("Why is the sky blue?") print(response.generated_text) # ' Rayleigh scattering' # Token Streaming text = "" async for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` Check all currently deployed models on the Huggingface Inference API with `Text Generation` support: ```python from text_generation.inference_api import deployed_models print(deployed_models()) ``` ### Hugging Face Inference Endpoint usage ```python from text_generation import Client endpoint_url = "https://YOUR_ENDPOINT.endpoints.huggingface.cloud" client = Client(endpoint_url) text = client.generate("Why is the sky blue?").generated_text print(text) # ' Rayleigh scattering' # Token Streaming text = "" for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` or with the asynchronous client: ```python from text_generation import AsyncClient endpoint_url = "https://YOUR_ENDPOINT.endpoints.huggingface.cloud" client = AsyncClient(endpoint_url) response = await client.generate("Why is the sky blue?") print(response.generated_text) # ' Rayleigh scattering' # Token Streaming text = "" async for response in client.generate_stream("Why is the sky blue?"): if not response.token.special: text += response.token.text print(text) # ' Rayleigh scattering' ``` ### Types ```python # enum for grammar type class GrammarType(Enum): Json = "json" Regex = "regex" # Grammar type and value class Grammar: # Grammar type type: GrammarType # Grammar value value: Union[str, dict] class Parameters: # Activate logits sampling do_sample: bool # Maximum number of generated tokens max_new_tokens: int # The parameter for repetition penalty. 1.0 means no penalty. # See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. repetition_penalty: Optional[float] # The parameter for frequency penalty. 1.0 means no penalty # Penalize new tokens based on their existing frequency in the text so far, # decreasing the model's likelihood to repeat the same line verbatim. frequency_penalty: Optional[float] # Whether to prepend the prompt to the generated text return_full_text: bool # Stop generating tokens if a member of `stop_sequences` is generated stop: List[str] # Random sampling seed seed: Optional[int] # The value used to module the logits distribution. temperature: Optional[float] # The number of highest probability vocabulary tokens to keep for top-k-filtering. top_k: Optional[int] # If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or # higher are kept for generation. top_p: Optional[float] # truncate inputs tokens to the given size truncate: Optional[int] # Typical Decoding mass # See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information typical_p: Optional[float] # Generate best_of sequences and return the one if the highest token logprobs best_of: Optional[int] # Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) watermark: bool # Get generation details details: bool # Get decoder input token logprobs and ids decoder_input_details: bool # Return the N most likely tokens at each step top_n_tokens: Optional[int] # grammar to use for generation grammar: Optional[Grammar] class Request: # Prompt inputs: str # Generation parameters parameters: Optional[Parameters] # Whether to stream output tokens stream: bool # Decoder input tokens class InputToken: # Token ID from the model tokenizer id: int # Token text text: str # Logprob # Optional since the logprob of the first token cannot be computed logprob: Optional[float] # Generated tokens class Token: # Token ID from the model tokenizer id: int # Token text text: str # Logprob logprob: Optional[float] # Is the token a special token # Can be used to ignore tokens when concatenating special: bool # Generation finish reason class FinishReason(Enum): # number of generated tokens == `max_new_tokens` Length = "length" # the model generated its end of sequence token EndOfSequenceToken = "eos_token" # the model generated a text included in `stop_sequences` StopSequence = "stop_sequence" # Additional sequences when using the `best_of` parameter class BestOfSequence: # Generated text generated_text: str # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # Decoder input tokens, empty if decoder_input_details is False prefill: List[InputToken] # Generated tokens tokens: List[Token] # Most likely tokens top_tokens: Optional[List[List[Token]]] # `generate` details class Details: # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # Decoder input tokens, empty if decoder_input_details is False prefill: List[InputToken] # Generated tokens tokens: List[Token] # Most likely tokens top_tokens: Optional[List[List[Token]]] # Additional sequences when using the `best_of` parameter best_of_sequences: Optional[List[BestOfSequence]] # `generate` return value class Response: # Generated text generated_text: str # Generation details details: Details # `generate_stream` details class StreamDetails: # Generation finish reason finish_reason: FinishReason # Number of generated tokens generated_tokens: int # Sampling seed if sampling was activated seed: Optional[int] # `generate_stream` return value class StreamResponse: # Generated token token: Token # Most likely tokens top_tokens: Optional[List[Token]] # Complete generated text # Only available when the generation is finished generated_text: Optional[str] # Generation details # Only available when the generation is finished details: Optional[StreamDetails] # Inference API currently deployed model class DeployedModel: model_id: str sha: str ```
text-generation-inference/clients/python/README.md/0
{ "file_path": "text-generation-inference/clients/python/README.md", "repo_id": "text-generation-inference", "token_count": 2491 }
298
{ "openapi": "3.0.3", "info": { "title": "Text Generation Inference", "description": "Text Generation Webserver", "contact": { "name": "Olivier Dehaene" }, "license": { "name": "Apache 2.0", "url": "https://www.apache.org/licenses/LICENSE-2.0" }, "version": "3.3.4-dev0" }, "paths": { "/": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate tokens if `stream == false` or a stream of token if `stream == true`", "operationId": "compat_generate", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CompatGenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Text", "content": { "application/json": { "schema": { "type": "array", "items": { "$ref": "#/components/schemas/GenerateResponse" } } }, "text/event-stream": { "schema": { "$ref": "#/components/schemas/StreamResponse" } } } }, "422": { "description": "Input validation error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/chat_tokenize": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Template and tokenize ChatRequest", "operationId": "get_chat_tokenize", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ChatRequest" } } }, "required": true }, "responses": { "200": { "description": "Templated and tokenized ChatRequest", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ChatTokenizeResponse" } } } }, "404": { "description": "Failed to tokenize ChatRequest", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" } } } } } } }, "/generate": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate tokens", "operationId": "generate", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Text", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateResponse" } } } }, "422": { "description": "Input validation error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/generate_stream": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate a stream of token using Server-Sent Events", "operationId": "generate_stream", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Text", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/StreamResponse" } } } }, "422": { "description": "Input validation error", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "text/event-stream": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/health": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Health check method", "operationId": "health", "responses": { "200": { "description": "Everything is working fine" }, "503": { "description": "Text generation inference is down", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "unhealthy", "error_type": "healthcheck" } } } } } } }, "/info": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Text Generation Inference endpoint info", "operationId": "get_model_info", "responses": { "200": { "description": "Served model info", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/Info" } } } } } } }, "/invocations": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate tokens from Sagemaker request", "operationId": "sagemaker_compatibility", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/SagemakerRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Chat Completion", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/SagemakerResponse" } }, "text/event-stream": { "schema": { "$ref": "#/components/schemas/SagemakerStreamResponse" } } } }, "422": { "description": "Input validation error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error", "error_type": "validation" } } } }, "424": { "description": "Generation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation", "error_type": "generation" } } } }, "429": { "description": "Model is overloaded", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded", "error_type": "overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation", "error_type": "incomplete_generation" } } } } } } }, "/metrics": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Prometheus metrics scrape endpoint", "operationId": "metrics", "responses": { "200": { "description": "Prometheus Metrics", "content": { "text/plain": { "schema": { "type": "string" } } } } } } }, "/tokenize": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Tokenize inputs", "operationId": "tokenize", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/GenerateRequest" } } }, "required": true }, "responses": { "200": { "description": "Tokenized ids", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/TokenizeResponse" } } } }, "404": { "description": "No tokenizer found", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "No fast tokenizer available" } } } } } } }, "/v1/chat/completions": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate tokens", "operationId": "chat_completions", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ChatRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Chat Completion", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ChatCompletion" } }, "text/event-stream": { "schema": { "$ref": "#/components/schemas/ChatCompletionChunk" } } } }, "422": { "description": "Input validation error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/v1/completions": { "post": { "tags": [ "Text Generation Inference" ], "summary": "Generate tokens", "operationId": "completions", "requestBody": { "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CompletionRequest" } } }, "required": true }, "responses": { "200": { "description": "Generated Chat Completion", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/CompletionFinal" } }, "text/event-stream": { "schema": { "$ref": "#/components/schemas/Chunk" } } } }, "422": { "description": "Input validation error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Input validation error" } } } }, "424": { "description": "Generation Error", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Request failed during generation" } } } }, "429": { "description": "Model is overloaded", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Model is overloaded" } } } }, "500": { "description": "Incomplete generation", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" }, "example": { "error": "Incomplete generation" } } } } } } }, "/v1/models": { "get": { "tags": [ "Text Generation Inference" ], "summary": "Get model info", "operationId": "openai_get_model_info", "responses": { "200": { "description": "Served model info", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ModelInfo" } } } }, "404": { "description": "Model not found", "content": { "application/json": { "schema": { "$ref": "#/components/schemas/ErrorResponse" } } } } } } } }, "components": { "schemas": { "BestOfSequence": { "type": "object", "required": [ "generated_text", "finish_reason", "generated_tokens", "prefill", "tokens" ], "properties": { "finish_reason": { "$ref": "#/components/schemas/FinishReason" }, "generated_text": { "type": "string", "example": "test" }, "generated_tokens": { "type": "integer", "format": "int32", "example": 1, "minimum": 0 }, "prefill": { "type": "array", "items": { "$ref": "#/components/schemas/PrefillToken" } }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0 }, "tokens": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } }, "top_tokens": { "type": "array", "items": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } } } } }, "ChatCompletion": { "type": "object", "required": [ "id", "created", "model", "system_fingerprint", "choices", "usage" ], "properties": { "choices": { "type": "array", "items": { "$ref": "#/components/schemas/ChatCompletionComplete" } }, "created": { "type": "integer", "format": "int64", "example": "1706270835", "minimum": 0 }, "id": { "type": "string" }, "model": { "type": "string", "example": "mistralai/Mistral-7B-Instruct-v0.2" }, "system_fingerprint": { "type": "string" }, "usage": { "$ref": "#/components/schemas/Usage" } } }, "ChatCompletionChoice": { "type": "object", "required": [ "index", "delta" ], "properties": { "delta": { "$ref": "#/components/schemas/ChatCompletionDelta" }, "finish_reason": { "type": "string", "nullable": true }, "index": { "type": "integer", "format": "int32", "minimum": 0 }, "logprobs": { "allOf": [ { "$ref": "#/components/schemas/ChatCompletionLogprobs" } ], "nullable": true } } }, "ChatCompletionChunk": { "type": "object", "required": [ "id", "created", "model", "system_fingerprint", "choices" ], "properties": { "choices": { "type": "array", "items": { "$ref": "#/components/schemas/ChatCompletionChoice" } }, "created": { "type": "integer", "format": "int64", "example": "1706270978", "minimum": 0 }, "id": { "type": "string" }, "model": { "type": "string", "example": "mistralai/Mistral-7B-Instruct-v0.2" }, "system_fingerprint": { "type": "string" }, "usage": { "allOf": [ { "$ref": "#/components/schemas/Usage" } ], "nullable": true } } }, "ChatCompletionComplete": { "type": "object", "required": [ "index", "message", "finish_reason" ], "properties": { "finish_reason": { "type": "string" }, "index": { "type": "integer", "format": "int32", "minimum": 0 }, "logprobs": { "allOf": [ { "$ref": "#/components/schemas/ChatCompletionLogprobs" } ], "nullable": true }, "message": { "$ref": "#/components/schemas/OutputMessage" } } }, "ChatCompletionDelta": { "oneOf": [ { "$ref": "#/components/schemas/TextMessage" }, { "$ref": "#/components/schemas/ToolCallDelta" } ] }, "ChatCompletionLogprob": { "type": "object", "required": [ "token", "logprob", "top_logprobs" ], "properties": { "logprob": { "type": "number", "format": "float" }, "token": { "type": "string" }, "top_logprobs": { "type": "array", "items": { "$ref": "#/components/schemas/ChatCompletionTopLogprob" } } } }, "ChatCompletionLogprobs": { "type": "object", "required": [ "content" ], "properties": { "content": { "type": "array", "items": { "$ref": "#/components/schemas/ChatCompletionLogprob" } } } }, "ChatCompletionTopLogprob": { "type": "object", "required": [ "token", "logprob" ], "properties": { "logprob": { "type": "number", "format": "float" }, "token": { "type": "string" } } }, "ChatRequest": { "type": "object", "required": [ "messages" ], "properties": { "frequency_penalty": { "type": "number", "format": "float", "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.", "example": "1.0", "nullable": true }, "logit_bias": { "type": "array", "items": { "type": "number", "format": "float" }, "description": "UNUSED\nModify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens\n(specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically,\nthe bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model,\nbut values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should\nresult in a ban or exclusive selection of the relevant token.", "nullable": true }, "logprobs": { "type": "boolean", "description": "Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each\noutput token returned in the content of message.", "example": "false", "nullable": true }, "max_tokens": { "type": "integer", "format": "int32", "description": "The maximum number of tokens that can be generated in the chat completion.", "default": "1024", "example": "32", "nullable": true, "minimum": 0 }, "messages": { "type": "array", "items": { "$ref": "#/components/schemas/Message" }, "description": "A list of messages comprising the conversation so far.", "example": "[{\"role\": \"user\", \"content\": \"What is Deep Learning?\"}]" }, "model": { "type": "string", "description": "[UNUSED] ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", "example": "mistralai/Mistral-7B-Instruct-v0.2", "nullable": true }, "n": { "type": "integer", "format": "int32", "description": "UNUSED\nHow many chat completion choices to generate for each input message. Note that you will be charged based on the\nnumber of generated tokens across all of the choices. Keep n as 1 to minimize costs.", "example": "2", "nullable": true, "minimum": 0 }, "presence_penalty": { "type": "number", "format": "float", "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far,\nincreasing the model's likelihood to talk about new topics", "example": 0.1, "nullable": true }, "response_format": { "allOf": [ { "$ref": "#/components/schemas/GrammarType" } ], "default": "null", "nullable": true }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0 }, "stop": { "type": "array", "items": { "type": "string" }, "description": "Up to 4 sequences where the API will stop generating further tokens.", "example": "null", "nullable": true }, "stream": { "type": "boolean" }, "stream_options": { "allOf": [ { "$ref": "#/components/schemas/StreamOptions" } ], "nullable": true }, "temperature": { "type": "number", "format": "float", "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while\nlower values like 0.2 will make it more focused and deterministic.\n\nWe generally recommend altering this or `top_p` but not both.", "example": 1.0, "nullable": true }, "tool_choice": { "allOf": [ { "$ref": "#/components/schemas/ToolChoice" } ], "default": "auto", "nullable": true }, "tool_prompt": { "type": "string", "description": "A prompt to be appended before the tools", "example": "Given the functions available, please respond with a JSON for a function call with its proper arguments that best answers the given prompt. Respond in the format {name: function name, parameters: dictionary of argument name and its value}.Do not use variables.", "nullable": true }, "tools": { "type": "array", "items": { "$ref": "#/components/schemas/Tool" }, "description": "A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of\nfunctions the model may generate JSON inputs for.", "example": "null", "nullable": true }, "top_logprobs": { "type": "integer", "format": "int32", "description": "An integer between 0 and 5 specifying the number of most likely tokens to return at each token position, each with\nan associated log probability. logprobs must be set to true if this parameter is used.", "example": "5", "nullable": true, "minimum": 0 }, "top_p": { "type": "number", "format": "float", "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the\ntokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.", "example": 0.95, "nullable": true } } }, "ChatTokenizeResponse": { "type": "object", "required": [ "tokenize_response", "templated_text" ], "properties": { "templated_text": { "type": "string" }, "tokenize_response": { "$ref": "#/components/schemas/TokenizeResponse" } } }, "Chunk": { "type": "object", "required": [ "id", "created", "choices", "model", "system_fingerprint" ], "properties": { "choices": { "type": "array", "items": { "$ref": "#/components/schemas/CompletionComplete" } }, "created": { "type": "integer", "format": "int64", "minimum": 0 }, "id": { "type": "string" }, "model": { "type": "string" }, "system_fingerprint": { "type": "string" } } }, "CompatGenerateRequest": { "type": "object", "required": [ "inputs" ], "properties": { "inputs": { "type": "string", "example": "My name is Olivier and I" }, "parameters": { "$ref": "#/components/schemas/GenerateParameters" }, "stream": { "type": "boolean", "default": "false" } } }, "Completion": { "oneOf": [ { "allOf": [ { "$ref": "#/components/schemas/Chunk" }, { "type": "object", "required": [ "object" ], "properties": { "object": { "type": "string", "enum": [ "text_completion" ] } } } ] }, { "allOf": [ { "$ref": "#/components/schemas/CompletionFinal" }, { "type": "object", "required": [ "object" ], "properties": { "object": { "type": "string", "enum": [ "text_completion" ] } } } ] } ], "discriminator": { "propertyName": "object" } }, "CompletionComplete": { "type": "object", "required": [ "index", "text", "finish_reason" ], "properties": { "finish_reason": { "type": "string" }, "index": { "type": "integer", "format": "int32", "minimum": 0 }, "logprobs": { "type": "array", "items": { "type": "number", "format": "float" }, "nullable": true }, "text": { "type": "string" } } }, "CompletionFinal": { "type": "object", "required": [ "id", "created", "model", "system_fingerprint", "choices", "usage" ], "properties": { "choices": { "type": "array", "items": { "$ref": "#/components/schemas/CompletionComplete" } }, "created": { "type": "integer", "format": "int64", "example": "1706270835", "minimum": 0 }, "id": { "type": "string" }, "model": { "type": "string", "example": "mistralai/Mistral-7B-Instruct-v0.2" }, "system_fingerprint": { "type": "string" }, "usage": { "$ref": "#/components/schemas/Usage" } } }, "CompletionRequest": { "type": "object", "required": [ "prompt" ], "properties": { "frequency_penalty": { "type": "number", "format": "float", "description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.", "example": "1.0", "nullable": true }, "max_tokens": { "type": "integer", "format": "int32", "description": "The maximum number of tokens that can be generated in the chat completion.", "default": "1024", "example": "32", "nullable": true, "minimum": 0 }, "model": { "type": "string", "description": "UNUSED\nID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.", "example": "mistralai/Mistral-7B-Instruct-v0.2", "nullable": true }, "prompt": { "$ref": "#/components/schemas/Prompt" }, "repetition_penalty": { "type": "number", "format": "float", "nullable": true }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0 }, "stop": { "type": "array", "items": { "type": "string" }, "description": "Up to 4 sequences where the API will stop generating further tokens.", "example": "null", "nullable": true }, "stream": { "type": "boolean" }, "suffix": { "type": "string", "description": "The text to append to the prompt. This is useful for completing sentences or generating a paragraph of text.\nplease see the completion_template field in the model's tokenizer_config.json file for completion template.", "nullable": true }, "temperature": { "type": "number", "format": "float", "description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while\nlower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.", "example": 1.0, "nullable": true }, "top_p": { "type": "number", "format": "float", "description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the\ntokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.", "example": 0.95, "nullable": true } } }, "DeltaToolCall": { "type": "object", "required": [ "index", "id", "type", "function" ], "properties": { "function": { "$ref": "#/components/schemas/Function" }, "id": { "type": "string" }, "index": { "type": "integer", "format": "int32", "minimum": 0 }, "type": { "type": "string" } } }, "Details": { "type": "object", "required": [ "finish_reason", "generated_tokens", "prefill", "tokens" ], "properties": { "best_of_sequences": { "type": "array", "items": { "$ref": "#/components/schemas/BestOfSequence" }, "nullable": true }, "finish_reason": { "$ref": "#/components/schemas/FinishReason" }, "generated_tokens": { "type": "integer", "format": "int32", "example": 1, "minimum": 0 }, "prefill": { "type": "array", "items": { "$ref": "#/components/schemas/PrefillToken" } }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0 }, "tokens": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } }, "top_tokens": { "type": "array", "items": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } } } } }, "ErrorResponse": { "type": "object", "required": [ "error", "error_type" ], "properties": { "error": { "type": "string" }, "error_type": { "type": "string" } } }, "FinishReason": { "type": "string", "enum": [ "length", "eos_token", "stop_sequence" ], "example": "Length" }, "Function": { "type": "object", "required": [ "arguments" ], "properties": { "arguments": { "type": "string" }, "name": { "type": "string", "nullable": true } } }, "FunctionDefinition": { "type": "object", "required": [ "name", "arguments" ], "properties": { "arguments": {}, "description": { "type": "string", "nullable": true }, "name": { "type": "string" } } }, "FunctionName": { "type": "object", "required": [ "name" ], "properties": { "name": { "type": "string" } } }, "GenerateParameters": { "type": "object", "properties": { "adapter_id": { "type": "string", "description": "Lora adapter id", "default": "null", "example": "null", "nullable": true }, "best_of": { "type": "integer", "description": "Generate best_of sequences and return the one if the highest token logprobs.", "default": "null", "example": 1, "nullable": true, "minimum": 0, "exclusiveMinimum": 0 }, "decoder_input_details": { "type": "boolean", "description": "Whether to return decoder input token logprobs and ids.", "default": "false" }, "details": { "type": "boolean", "description": "Whether to return generation details.", "default": "true" }, "do_sample": { "type": "boolean", "description": "Activate logits sampling.", "default": "false", "example": true }, "frequency_penalty": { "type": "number", "format": "float", "description": "The parameter for frequency penalty. 1.0 means no penalty\nPenalize new tokens based on their existing frequency in the text so far,\ndecreasing the model's likelihood to repeat the same line verbatim.", "default": "null", "example": 0.1, "nullable": true, "exclusiveMinimum": -2 }, "grammar": { "allOf": [ { "$ref": "#/components/schemas/GrammarType" } ], "default": "null", "nullable": true }, "max_new_tokens": { "type": "integer", "format": "int32", "description": "Maximum number of tokens to generate.", "default": "1024", "example": "20", "nullable": true, "minimum": 0 }, "repetition_penalty": { "type": "number", "format": "float", "description": "The parameter for repetition penalty. 1.0 means no penalty.\nSee [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.", "default": "null", "example": 1.03, "nullable": true, "exclusiveMinimum": 0 }, "return_full_text": { "type": "boolean", "description": "Whether to prepend the prompt to the generated text", "default": "null", "example": false, "nullable": true }, "seed": { "type": "integer", "format": "int64", "description": "Random sampling seed.", "default": "null", "example": "null", "nullable": true, "minimum": 0, "exclusiveMinimum": 0 }, "stop": { "type": "array", "items": { "type": "string" }, "description": "Stop generating tokens if a member of `stop` is generated.", "example": [ "photographer" ], "maxItems": 4 }, "temperature": { "type": "number", "format": "float", "description": "The value used to module the logits distribution.", "default": "null", "example": 0.5, "nullable": true, "exclusiveMinimum": 0 }, "top_k": { "type": "integer", "format": "int32", "description": "The number of highest probability vocabulary tokens to keep for top-k-filtering.", "default": "null", "example": 10, "nullable": true, "exclusiveMinimum": 0 }, "top_n_tokens": { "type": "integer", "format": "int32", "description": "The number of highest probability vocabulary tokens to keep for top-n-filtering.", "default": "null", "example": 5, "nullable": true, "minimum": 0, "exclusiveMinimum": 0 }, "top_p": { "type": "number", "format": "float", "description": "Top-p value for nucleus sampling.", "default": "null", "example": 0.95, "nullable": true, "maximum": 1, "exclusiveMinimum": 0 }, "truncate": { "type": "integer", "description": "Truncate inputs tokens to the given size.", "default": "null", "example": "null", "nullable": true, "minimum": 0 }, "typical_p": { "type": "number", "format": "float", "description": "Typical Decoding mass\nSee [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information.", "default": "null", "example": 0.95, "nullable": true, "maximum": 1, "exclusiveMinimum": 0 }, "watermark": { "type": "boolean", "description": "Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226).", "default": "false", "example": true } } }, "GenerateRequest": { "type": "object", "required": [ "inputs" ], "properties": { "inputs": { "type": "string", "example": "My name is Olivier and I" }, "parameters": { "$ref": "#/components/schemas/GenerateParameters" } } }, "GenerateResponse": { "type": "object", "required": [ "generated_text" ], "properties": { "details": { "allOf": [ { "$ref": "#/components/schemas/Details" } ], "nullable": true }, "generated_text": { "type": "string", "example": "test" } } }, "GrammarType": { "oneOf": [ { "type": "object", "required": [ "type", "value" ], "properties": { "type": { "type": "string", "enum": [ "json" ] }, "value": { "description": "A string that represents a [JSON Schema](https://json-schema.org/).\n\nJSON Schema is a declarative language that allows to annotate JSON documents\nwith types and descriptions." } } }, { "type": "object", "required": [ "type", "value" ], "properties": { "type": { "type": "string", "enum": [ "regex" ] }, "value": { "type": "string" } } }, { "type": "object", "required": [ "type", "value" ], "properties": { "type": { "type": "string", "enum": [ "json_schema" ] }, "value": { "$ref": "#/components/schemas/JsonSchemaConfig" } } } ], "discriminator": { "propertyName": "type" } }, "Info": { "type": "object", "required": [ "model_id", "max_concurrent_requests", "max_best_of", "max_stop_sequences", "max_input_tokens", "max_total_tokens", "validation_workers", "max_client_batch_size", "router", "version" ], "properties": { "docker_label": { "type": "string", "example": "null", "nullable": true }, "max_best_of": { "type": "integer", "example": "2", "minimum": 0 }, "max_client_batch_size": { "type": "integer", "example": "32", "minimum": 0 }, "max_concurrent_requests": { "type": "integer", "description": "Router Parameters", "example": "128", "minimum": 0 }, "max_input_tokens": { "type": "integer", "example": "1024", "minimum": 0 }, "max_stop_sequences": { "type": "integer", "example": "4", "minimum": 0 }, "max_total_tokens": { "type": "integer", "example": "2048", "minimum": 0 }, "model_id": { "type": "string", "description": "Model info", "example": "bigscience/blomm-560m" }, "model_pipeline_tag": { "type": "string", "example": "text-generation", "nullable": true }, "model_sha": { "type": "string", "example": "e985a63cdc139290c5f700ff1929f0b5942cced2", "nullable": true }, "router": { "type": "string", "description": "Router Info", "example": "text-generation-router" }, "sha": { "type": "string", "example": "null", "nullable": true }, "validation_workers": { "type": "integer", "example": "2", "minimum": 0 }, "version": { "type": "string", "example": "0.5.0" } } }, "JsonSchemaConfig": { "type": "object", "required": [ "schema" ], "properties": { "name": { "type": "string", "description": "Optional name identifier for the schema", "nullable": true }, "schema": { "description": "The actual JSON schema definition" } } }, "Message": { "allOf": [ { "$ref": "#/components/schemas/MessageBody" }, { "type": "object", "required": [ "role" ], "properties": { "name": { "type": "string", "example": "\"David\"", "nullable": true }, "role": { "type": "string", "example": "user" } } } ] }, "MessageBody": { "oneOf": [ { "type": "object", "required": [ "content" ], "properties": { "content": { "$ref": "#/components/schemas/MessageContent" } } }, { "type": "object", "required": [ "tool_calls" ], "properties": { "tool_calls": { "type": "array", "items": { "$ref": "#/components/schemas/ToolCall" } } } } ] }, "MessageChunk": { "oneOf": [ { "type": "object", "required": [ "text", "type" ], "properties": { "text": { "type": "string" }, "type": { "type": "string", "enum": [ "text" ] } } }, { "type": "object", "required": [ "image_url", "type" ], "properties": { "image_url": { "$ref": "#/components/schemas/Url" }, "type": { "type": "string", "enum": [ "image_url" ] } } } ], "discriminator": { "propertyName": "type" } }, "MessageContent": { "oneOf": [ { "type": "string" }, { "type": "array", "items": { "$ref": "#/components/schemas/MessageChunk" } } ] }, "ModelInfo": { "type": "object", "required": [ "id", "object", "created", "owned_by" ], "properties": { "created": { "type": "integer", "format": "int64", "example": 1686935002, "minimum": 0 }, "id": { "type": "string", "example": "gpt2" }, "object": { "type": "string", "example": "model" }, "owned_by": { "type": "string", "example": "openai" } } }, "OutputMessage": { "oneOf": [ { "$ref": "#/components/schemas/TextMessage" }, { "$ref": "#/components/schemas/ToolCallMessage" } ] }, "PrefillToken": { "type": "object", "required": [ "id", "text", "logprob" ], "properties": { "id": { "type": "integer", "format": "int32", "example": 0, "minimum": 0 }, "logprob": { "type": "number", "format": "float", "example": -0.34, "nullable": true }, "text": { "type": "string", "example": "test" } } }, "Prompt": { "type": "array", "items": { "type": "string" } }, "SagemakerRequest": { "oneOf": [ { "$ref": "#/components/schemas/CompatGenerateRequest" }, { "$ref": "#/components/schemas/ChatRequest" }, { "$ref": "#/components/schemas/CompletionRequest" } ] }, "SagemakerResponse": { "oneOf": [ { "$ref": "#/components/schemas/GenerateResponse" }, { "$ref": "#/components/schemas/ChatCompletion" }, { "$ref": "#/components/schemas/CompletionFinal" } ] }, "SagemakerStreamResponse": { "oneOf": [ { "$ref": "#/components/schemas/StreamResponse" }, { "$ref": "#/components/schemas/ChatCompletionChunk" }, { "$ref": "#/components/schemas/Chunk" } ] }, "SimpleToken": { "type": "object", "required": [ "id", "text", "start", "stop" ], "properties": { "id": { "type": "integer", "format": "int32", "example": 0, "minimum": 0 }, "start": { "type": "integer", "example": 0, "minimum": 0 }, "stop": { "type": "integer", "example": 2, "minimum": 0 }, "text": { "type": "string", "example": "test" } } }, "StreamDetails": { "type": "object", "required": [ "finish_reason", "generated_tokens", "input_length" ], "properties": { "finish_reason": { "$ref": "#/components/schemas/FinishReason" }, "generated_tokens": { "type": "integer", "format": "int32", "example": 1, "minimum": 0 }, "input_length": { "type": "integer", "format": "int32", "example": 1, "minimum": 0 }, "seed": { "type": "integer", "format": "int64", "example": 42, "nullable": true, "minimum": 0 } } }, "StreamOptions": { "type": "object", "properties": { "include_usage": { "type": "boolean", "description": "If set, an additional chunk will be streamed before the data: [DONE] message. The usage field on this chunk shows the token usage statistics for the entire request, and the choices field will always be an empty array. All other chunks will also include a usage field, but with a null value.", "example": "true" } } }, "StreamResponse": { "type": "object", "required": [ "index", "token" ], "properties": { "details": { "allOf": [ { "$ref": "#/components/schemas/StreamDetails" } ], "default": "null", "nullable": true }, "generated_text": { "type": "string", "default": "null", "example": "test", "nullable": true }, "index": { "type": "integer", "format": "int32", "minimum": 0 }, "token": { "$ref": "#/components/schemas/Token" }, "top_tokens": { "type": "array", "items": { "$ref": "#/components/schemas/Token" } } } }, "TextMessage": { "type": "object", "required": [ "role", "content" ], "properties": { "content": { "type": "string", "example": "My name is David and I" }, "role": { "type": "string", "example": "user" }, "tool_call_id": { "type": "string", "nullable": true } } }, "Token": { "type": "object", "required": [ "id", "text", "logprob", "special" ], "properties": { "id": { "type": "integer", "format": "int32", "example": 0, "minimum": 0 }, "logprob": { "type": "number", "format": "float", "example": -0.34, "nullable": true }, "special": { "type": "boolean", "example": "false" }, "text": { "type": "string", "example": "test" } } }, "TokenizeResponse": { "type": "array", "items": { "$ref": "#/components/schemas/SimpleToken" } }, "Tool": { "type": "object", "required": [ "type", "function" ], "properties": { "function": { "$ref": "#/components/schemas/FunctionDefinition" }, "type": { "type": "string", "example": "function" } } }, "ToolCall": { "type": "object", "required": [ "id", "type", "function" ], "properties": { "function": { "$ref": "#/components/schemas/FunctionDefinition" }, "id": { "type": "string" }, "type": { "type": "string" } } }, "ToolCallDelta": { "type": "object", "required": [ "role", "tool_calls" ], "properties": { "role": { "type": "string", "example": "assistant" }, "tool_calls": { "type": "array", "items": { "$ref": "#/components/schemas/DeltaToolCall" } } } }, "ToolCallMessage": { "type": "object", "required": [ "role", "tool_calls" ], "properties": { "role": { "type": "string", "example": "assistant" }, "tool_calls": { "type": "array", "items": { "$ref": "#/components/schemas/ToolCall" } } } }, "ToolChoice": { "oneOf": [ { "type": "string", "description": "Means the model can pick between generating a message or calling one or more tools.", "enum": [ "auto" ] }, { "type": "string", "description": "Means the model will not call any tool and instead generates a message.", "enum": [ "none" ] }, { "type": "string", "description": "Means the model must call one or more tools.", "enum": [ "required" ] }, { "type": "object", "required": [ "function" ], "properties": { "function": { "$ref": "#/components/schemas/FunctionName" } } } ], "description": "<https://platform.openai.com/docs/guides/function-calling/configuring-function-calling-behavior-using-the-tool_choice-parameter>" }, "Url": { "type": "object", "required": [ "url" ], "properties": { "url": { "type": "string" } } }, "Usage": { "type": "object", "required": [ "prompt_tokens", "completion_tokens", "total_tokens" ], "properties": { "completion_tokens": { "type": "integer", "format": "int32", "minimum": 0 }, "prompt_tokens": { "type": "integer", "format": "int32", "minimum": 0 }, "total_tokens": { "type": "integer", "format": "int32", "minimum": 0 } } } } }, "tags": [ { "name": "Text Generation Inference", "description": "Hugging Face Text Generation Inference API" } ] }
text-generation-inference/docs/openapi.json/0
{ "file_path": "text-generation-inference/docs/openapi.json", "repo_id": "text-generation-inference", "token_count": 39763 }
299
# Vision Language Model Inference in TGI Visual Language Model (VLM) are models that consume both image and text inputs to generate text. VLM's are trained on a combination of image and text data and can handle a wide range of tasks, such as image captioning, visual question answering, and visual dialog. > What distinguishes VLMs from other text and image models is their ability to handle long context and generate text that is coherent and relevant to the image even after multiple turns or in some cases, multiple images. Below are couple of common use cases for vision language models: - **Image Captioning**: Given an image, generate a caption that describes the image. - **Visual Question Answering (VQA)**: Given an image and a question about the image, generate an answer to the question. - **Mulimodal Dialog**: Generate response to multiple turns of images and conversations. - **Image Information Retrieval**: Given an image, retrieve information from the image. ## How to Use a Vision Language Model? ### Hugging Face Hub Python Library To infer with vision language models through Python, you can use the [`huggingface_hub`](https://pypi.org/project/huggingface-hub/) library. The `InferenceClient` class provides a simple way to interact with the [Inference API](https://huggingface.co/docs/api-inference/index). Images can be passed as URLs or base64-encoded strings. The `InferenceClient` will automatically detect the image format. ```python from huggingface_hub import InferenceClient client = InferenceClient(base_url="http://127.0.0.1:3000") image = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png" prompt = f"![]({image})What is this a picture of?\n\n" for token in client.text_generation(prompt, max_new_tokens=16, stream=True): print(token) # This is a picture of an anthropomorphic rabbit in a space suit. ``` ```python from huggingface_hub import InferenceClient import base64 import requests import io client = InferenceClient(base_url="http://127.0.0.1:3000") # read image from local file image_path = "rabbit.png" with open(image_path, "rb") as f: image = base64.b64encode(f.read()).decode("utf-8") image = f"data:image/png;base64,{image}" prompt = f"![]({image})What is this a picture of?\n\n" for token in client.text_generation(prompt, max_new_tokens=10, stream=True): print(token) # This is a picture of an anthropomorphic rabbit in a space suit. ``` or via the `chat_completion` endpoint: ```python from huggingface_hub import InferenceClient client = InferenceClient(base_url="http://127.0.0.1:3000") chat = client.chat_completion( messages=[ { "role": "user", "content": [ {"type": "text", "text": "Whats in this image?"}, { "type": "image_url", "image_url": { "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png" }, }, ], }, ], seed=42, max_tokens=100, ) print(chat) # ChatCompletionOutput(choices=[ChatCompletionOutputComplete(finish_reason='length', index=0, message=ChatCompletionOutputMessage(role='assistant', content=" The image you've provided features an anthropomorphic rabbit in spacesuit attire. This rabbit is depicted with human-like posture and movement, standing on a rocky terrain with a vast, reddish-brown landscape in the background. The spacesuit is detailed with mission patches, circuitry, and a helmet that covers the rabbit's face and ear, with an illuminated red light on the chest area.\n\nThe artwork style is that of a", name=None, tool_calls=None), logprobs=None)], created=1714589614, id='', model='llava-hf/llava-v1.6-mistral-7b-hf', object='text_completion', system_fingerprint='2.0.2-native', usage=ChatCompletionOutputUsage(completion_tokens=100, prompt_tokens=2943, total_tokens=3043)) ``` or with OpenAI's [client library](https://github.com/openai/openai-python): ```python from openai import OpenAI # init the client but point it to TGI client = OpenAI(base_url="http://localhost:3000/v1", api_key="-") chat_completion = client.chat.completions.create( model="tgi", messages=[ { "role": "user", "content": [ {"type": "text", "text": "Whats in this image?"}, { "type": "image_url", "image_url": { "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png" }, }, ], }, ], stream=False, ) print(chat_completion) # ChatCompletion(id='', choices=[Choice(finish_reason='eos_token', index=0, logprobs=None, message=ChatCompletionMessage(content=' The image depicts an anthropomorphic rabbit dressed in a space suit with gear that resembles NASA attire. The setting appears to be a solar eclipse with dramatic mountain peaks and a partial celestial body in the sky. The artwork is detailed and vivid, with a warm color palette and a sense of an adventurous bunny exploring or preparing for a journey beyond Earth. ', role='assistant', function_call=None, tool_calls=None))], created=1714589732, model='llava-hf/llava-v1.6-mistral-7b-hf', object='text_completion', system_fingerprint='2.0.2-native', usage=CompletionUsage(completion_tokens=84, prompt_tokens=2943, total_tokens=3027)) ``` ### Inference Through Sending `cURL` Requests To use the `generate_stream` endpoint with curl, you can add the `-N` flag. This flag disables curl default buffering and shows data as it arrives from the server. ```bash curl -N 127.0.0.1:3000/generate_stream \ -X POST \ -d '{"inputs":"![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png)What is this a picture of?\n\n","parameters":{"max_new_tokens":16, "seed": 42}}' \ -H 'Content-Type: application/json' # ... # data:{"index":16,"token":{"id":28723,"text":".","logprob":-0.6196289,"special":false},"generated_text":"This is a picture of an anthropomorphic rabbit in a space suit.","details":null} ``` ### Inference Through JavaScript First, we need to install the `@huggingface/inference` library. ```bash npm install @huggingface/inference ``` Whether you use Inference Providers (our serverless API), or Inference Endpoints, you can call `InferenceClient`. We can create a `InferenceClient` providing our endpoint URL and [Hugging Face access token](https://huggingface.co/settings/tokens). ```js import { InferenceClient } from "@huggingface/inference"; const client = new InferenceClient('hf_YOUR_TOKEN', { endpointUrl: 'https://YOUR_ENDPOINT.endpoints.huggingface.cloud' }); const prompt = "![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png)What is this a picture of?\n\n"; const stream = client.textGenerationStream({ inputs: prompt, parameters: { max_new_tokens: 16, seed: 42 }, }); for await (const r of stream) { // yield the generated token process.stdout.write(r.token.text); } // This is a picture of an anthropomorphic rabbit in a space suit. ``` ## Combining Vision Language Models with Other Features VLMs in TGI have several advantages, for example these models can be used in tandem with other features for more complex tasks. For example, you can use VLMs with [Guided Generation](/docs/conceptual/guided-generation) to generate specific JSON data from an image. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png" width="400" /> </div> For example we can extract information from the rabbit image and generate a JSON object with the location, activity, number of animals seen, and the animals seen. That would look like this: ```json { "activity": "Standing", "animals": ["Rabbit"], "animals_seen": 1, "location": "Rocky surface with mountains in the background and a red light on the rabbit's chest" } ``` All we need to do is provide a JSON schema to the VLM model and it will generate the JSON object for us. ```bash curl localhost:3000/generate \ -X POST \ -H 'Content-Type: application/json' \ -d '{ "inputs":"![](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit.png)What is this a picture of?\n\n", "parameters": { "max_new_tokens": 100, "seed": 42, "grammar": { "type": "json", "value": { "properties": { "location": { "type": "string" }, "activity": { "type": "string" }, "animals_seen": { "type": "integer", "minimum": 1, "maximum": 5 }, "animals": { "type": "array", "items": { "type": "string" } } }, "required": ["location", "activity", "animals_seen", "animals"] } } } }' # { # "generated_text": "{ \"activity\": \"Standing\", \"animals\": [ \"Rabbit\" ], \"animals_seen\": 1, \"location\": \"Rocky surface with mountains in the background and a red light on the rabbit's chest\" }" # } ``` Want to learn more about how Vision Language Models work? Check out the [awesome blog post on the topic](https://huggingface.co/blog/vlms).
text-generation-inference/docs/source/basic_tutorials/visual_language_models.md/0
{ "file_path": "text-generation-inference/docs/source/basic_tutorials/visual_language_models.md", "repo_id": "text-generation-inference", "token_count": 3672 }
300
# Using TGI with Inferentia You can use TGI on AWS Trainium and Inferentia platforms using the [TGI neuron backend](https://huggingface.co/docs/text-generation-inference/backends/neuron).
text-generation-inference/docs/source/installation_inferentia.md/0
{ "file_path": "text-generation-inference/docs/source/installation_inferentia.md", "repo_id": "text-generation-inference", "token_count": 57 }
301
import asyncio import contextlib import logging import os import random import shutil import sys import tempfile import time from typing import List import docker import huggingface_hub import pytest from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError from docker.errors import NotFound from huggingface_hub import AsyncInferenceClient, TextGenerationOutput OPTIMUM_CACHE_REPO_ID = "optimum-internal-testing/neuron-testing-cache" HF_TOKEN = huggingface_hub.get_token() def get_tgi_docker_image(): docker_image = os.getenv("DOCKER_IMAGE", None) if docker_image is None: client = docker.from_env() images = client.images.list(filters={"reference": "text-generation-inference"}) if not images: raise ValueError( "No text-generation-inference image found on this host to run tests." ) docker_image = images[0].tags[0] return docker_image logging.basicConfig( level=logging.INFO, format="[%(asctime)s] %(levelname)s [%(filename)s.%(funcName)s:%(lineno)d] %(message)s", stream=sys.stdout, ) logger = logging.getLogger(__file__) class TestClient(AsyncInferenceClient): def __init__(self, service_name: str, base_url: str): super().__init__(model=base_url) self.service_name = service_name class LauncherHandle: def __init__(self, service_name: str, port: int): self.client = TestClient(service_name, f"http://localhost:{port}") def _inner_health(self): raise NotImplementedError async def health(self, timeout: int = 60): assert timeout > 0 for i in range(timeout): if not self._inner_health(): raise RuntimeError(f"Service crashed after {i} seconds.") try: await self.client.text_generation("test", max_new_tokens=1) logger.info(f"Service started after {i} seconds") return except (ClientConnectorError, ClientOSError, ServerDisconnectedError): time.sleep(1) except Exception: raise RuntimeError("Basic generation failed with: {e}") raise RuntimeError(f"Service failed to start after {i} seconds.") class ContainerLauncherHandle(LauncherHandle): def __init__(self, service_name, docker_client, container_name, port: int): super(ContainerLauncherHandle, self).__init__(service_name, port) self.docker_client = docker_client self.container_name = container_name self._log_since = time.time() def _inner_health(self) -> bool: container = self.docker_client.containers.get(self.container_name) container_output = container.logs(since=self._log_since).decode("utf-8") self._log_since = time.time() if container_output != "": print(container_output, end="") return container.status in ["running", "created"] @pytest.fixture(scope="module") def event_loop(): loop = asyncio.get_event_loop() yield loop loop.close() @pytest.fixture(scope="module") def neuron_launcher(event_loop): """Utility fixture to expose a TGI service. The fixture uses a single event loop for each module, but it can create multiple docker services with different parameters using the parametrized inner context. Args: service_name (`str`): Used to identify test configurations and adjust test expectations, model_name_or_path (`str`): The model to use (can be a hub model or a path) trust_remote_code (`bool`): Must be set to True for gated models. Returns: A `ContainerLauncherHandle` containing both a TGI server and client. """ @contextlib.contextmanager def docker_launcher( service_name: str, model_name_or_path: str, trust_remote_code: bool = False, ): port = random.randint(8000, 10_000) client = docker.from_env() container_name = f"tgi-tests-{service_name}-{port}" try: container = client.containers.get(container_name) container.stop() container.wait() except NotFound: pass env = { "LOG_LEVEL": "info,text_generation_router=debug", "CUSTOM_CACHE_REPO": OPTIMUM_CACHE_REPO_ID, } if HF_TOKEN is not None: env["HUGGING_FACE_HUB_TOKEN"] = HF_TOKEN env["HF_TOKEN"] = HF_TOKEN for var in [ "MAX_BATCH_SIZE", "MAX_TOTAL_TOKENS", "HF_AUTO_CAST_TYPE", "HF_NUM_CORES", ]: if var in os.environ: env[var] = os.environ[var] base_image = get_tgi_docker_image() if os.path.isdir(model_name_or_path): # Create a sub-image containing the model to workaround docker dind issues preventing # to share a volume from the container running tests test_image = f"{container_name}-img" logger.info( "Building image on the flight derivated from %s, tagged with %s", base_image, test_image, ) with tempfile.TemporaryDirectory() as context_dir: # Copy model directory to build context model_path = os.path.join(context_dir, "model") shutil.copytree(model_name_or_path, model_path) # Create Dockerfile container_model_id = f"/data/{model_name_or_path}" docker_content = f""" FROM {base_image} COPY model {container_model_id} """ with open(os.path.join(context_dir, "Dockerfile"), "wb") as f: f.write(docker_content.encode("utf-8")) f.flush() image, logs = client.images.build( path=context_dir, dockerfile=f.name, tag=test_image ) logger.info("Successfully built image %s", image.id) logger.debug("Build logs %s", logs) else: test_image = base_image image = None container_model_id = model_name_or_path args = ["--model-id", container_model_id, "--env"] if trust_remote_code: args.append("--trust-remote-code") container = client.containers.run( test_image, command=args, name=container_name, environment=env, auto_remove=False, detach=True, devices=["/dev/neuron0"], ports={"80/tcp": port}, shm_size="1G", ) logger.info(f"Starting {container_name} container") yield ContainerLauncherHandle(service_name, client, container.name, port) try: container.stop(timeout=60) container.wait(timeout=60) except Exception as e: logger.exception(f"Ignoring exception while stopping container: {e}.") pass finally: logger.info("Removing container %s", container_name) try: container.remove(force=True) except Exception as e: logger.error( "Error while removing container %s, skipping", container_name ) logger.exception(e) # Cleanup the build image if image: logger.info("Cleaning image %s", image.id) try: image.remove(force=True) except NotFound: pass except Exception as e: logger.error("Error while removing image %s, skipping", image.id) logger.exception(e) return docker_launcher @pytest.fixture(scope="module") def neuron_generate_load(): """A utility fixture to launch multiple asynchronous TGI requests in parallel Args: client (`AsyncClient`): An async client prompt (`str`): The prompt to use (identical for all requests) max_new_tokens (`int`): The number of tokens to generate for each request. n (`int`): The number of requests Returns: A list of `huggingface_hub.TextGenerationOutput`. """ async def generate_load_inner( client: AsyncInferenceClient, prompt: str, max_new_tokens: int, n: int ) -> List[TextGenerationOutput]: futures = [ client.text_generation( prompt, max_new_tokens=max_new_tokens, details=True, decoder_input_details=True, ) for _ in range(n) ] return await asyncio.gather(*futures) return generate_load_inner
text-generation-inference/integration-tests/fixtures/neuron/service.py/0
{ "file_path": "text-generation-inference/integration-tests/fixtures/neuron/service.py", "repo_id": "text-generation-inference", "token_count": 4078 }
302
{ "choices": [ { "finish_reason": "length", "index": 1, "logprobs": null, "text": " This is a question that has puzzled many people for" }, { "finish_reason": "length", "index": 0, "logprobs": null, "text": " A Beginner’s Guide\nDeep learning is a subset" }, { "finish_reason": "length", "index": 3, "logprobs": null, "text": "usculas_minusculas(s):\n \"\"\"\n" }, { "finish_reason": "length", "index": 2, "logprobs": null, "text": " Paris\nWhat is the capital of France?\nThe" } ], "created": 1741264813, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "text_completion", "system_fingerprint": "3.1.2-dev0-native", "usage": { "completion_tokens": 40, "prompt_tokens": 22, "total_tokens": 62 } }
text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_many_prompts.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_completion_prompts/test_flash_llama_completion_many_prompts.json", "repo_id": "text-generation-inference", "token_count": 432 }
303
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 7539, "logprob": -0.609375, "special": false, "text": " forms" }, { "id": 708, "logprob": 0.0, "special": false, "text": " are" }, { "id": 671, "logprob": -1.5546875, "special": false, "text": " an" }, { "id": 8727, "logprob": 0.0, "special": false, "text": " essential" }, { "id": 1702, "logprob": 0.0, "special": false, "text": " part" }, { "id": 576, "logprob": 0.0, "special": false, "text": " of" }, { "id": 573, "logprob": 0.0, "special": false, "text": " the" }, { "id": 11859, "logprob": -1.953125, "special": false, "text": " lab" }, { "id": 2185, "logprob": -1.7734375, "special": false, "text": " process" }, { "id": 235265, "logprob": 0.0, "special": false, "text": "." } ], "top_tokens": null }, "generated_text": "Test request forms are an essential part of the lab process." }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma/test_flash_gemma_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_gemma/test_flash_gemma_all_params.json", "repo_id": "text-generation-inference", "token_count": 849 }
304
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": 0, "tokens": [ { "id": 720, "logprob": 0.0, "special": false, "text": " \n" }, { "id": 34564, "logprob": -0.12512207, "special": false, "text": "Deep" }, { "id": 6975, "logprob": 0.0, "special": false, "text": " learning" }, { "id": 320, "logprob": -0.23840332, "special": false, "text": " (" }, { "id": 16931, "logprob": -2.0175781, "special": false, "text": "DL" }, { "id": 8, "logprob": 0.0, "special": false, "text": ")" }, { "id": 374, "logprob": -0.8613281, "special": false, "text": " is" }, { "id": 264, "logprob": 0.0, "special": false, "text": " a" }, { "id": 1207, "logprob": -1.2451172, "special": false, "text": " sub" }, { "id": 2630, "logprob": 0.0, "special": false, "text": "field" } ], "top_tokens": null }, "generated_text": "What is deep learning? \nDeep learning (DL) is a subfield" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_fp8_kv_cache/test_flash_llama_fp8_kv_cache_all_params.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama_fp8_kv_cache/test_flash_llama_fp8_kv_cache_all_params.json", "repo_id": "text-generation-inference", "token_count": 853 }
305
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [], "seed": null, "tokens": [ { "id": 28747, "logprob": -0.54785156, "special": false, "text": ":" }, { "id": 3169, "logprob": -1.4091797, "special": false, "text": " Let" }, { "id": 307, "logprob": -3.0273438, "special": false, "text": " n" }, { "id": 327, "logprob": -0.94433594, "special": false, "text": " =" }, { "id": 28705, "logprob": -0.81347656, "special": false, "text": " " }, { "id": 28740, "logprob": -1.2958984, "special": false, "text": "1" }, { "id": 28734, "logprob": -2.0644531, "special": false, "text": "0" }, { "id": 387, "logprob": -1.9580078, "special": false, "text": " -" }, { "id": 28705, "logprob": -0.5073242, "special": false, "text": " " }, { "id": 28740, "logprob": -1.1816406, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": ": Let n = 10 - 1" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_mistral/test_flash_mistral.json", "repo_id": "text-generation-inference", "token_count": 865 }
306
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 2, "prefill": [], "seed": null, "tokens": [ { "id": 54901, "logprob": -0.84765625, "special": false, "text": "beach" }, { "id": 1, "logprob": -0.008666992, "special": true, "text": "<eos>" } ], "top_tokens": null }, "generated_text": "beach" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_pali_gemma/test_flash_pali_gemma.json", "repo_id": "text-generation-inference", "token_count": 266 }
307
{ "choices": [ { "finish_reason": "stop", "index": 0, "logprobs": null, "message": { "content": "The image showcases a stunning cityscape, featuring the iconic Statue of Liberty in the foreground. The image displays Lady Liberty's imposing presence, with her towering base standing beside her. Behind the statue, the city's skyline extends across the horizon, adorned with numerous tall buildings, including the Empire State Building and other notable skyscrapers. The water reflecting the sun's rays creates a serene and picturesque scene, emphasizing the beauty and resilience of this global landmark. The sky is a clear, pale blue, adding to the overall tranquility of the scene.", "name": null, "role": "assistant", "tool_calls": null }, "usage": null } ], "created": 1738348090, "id": "", "model": "Qwen/Qwen2-VL-7B-Instruct", "object": "chat.completion", "system_fingerprint": "3.1.1-dev0-native", "usage": { "completion_tokens": 110, "prompt_tokens": 8736, "total_tokens": 8846 } }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2_vl/test_flash_qwen2_vl_bay.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_qwen2_vl/test_flash_qwen2_vl_bay.json", "repo_id": "text-generation-inference", "token_count": 376 }
308
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 2, "prefill": [], "seed": null, "tokens": [ { "id": 284, "logprob": -1.1679688, "special": false, "text": "\n " }, { "id": 0, "logprob": null, "special": true, "text": "<|endoftext|>" } ], "top_tokens": null }, "generated_text": "\n " }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq.json", "repo_id": "text-generation-inference", "token_count": 259 }
309
{ "choices": [ { "finish_reason": "stop", "index": 0, "logprobs": null, "message": { "content": "{\"name\":\"John Smith\",\"age\":30,\"address\":{\"street\":\"Maple Street\",\"city\":\"Boston\"},\"hobbies\":[\"botany\",\"astronomy\",\"solving mathematical puzzles\"]}", "role": "assistant" } } ], "created": 1746053373, "id": "", "model": "google/gemma-3-4b-it", "object": "chat.completion", "system_fingerprint": "3.3.4-dev0-native", "usage": { "completion_tokens": 44, "prompt_tokens": 37, "total_tokens": 81 } }
text-generation-inference/integration-tests/models/__snapshots__/test_json_schema_constrain/test_json_schema_complex.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_json_schema_constrain/test_json_schema_complex.json", "repo_id": "text-generation-inference", "token_count": 275 }
310
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 5, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": 0, "tokens": [ { "id": 926, "logprob": -4.3554688, "special": false, "text": " To" }, { "id": 18295, "logprob": -7.7734375, "special": false, "text": " sell" }, { "id": 7868, "logprob": -3.9257812, "special": false, "text": " things" }, { "id": 260, "logprob": -2.4179688, "special": false, "text": "." }, { "id": 1, "logprob": 0.0, "special": true, "text": "</s>" } ] }, "generated_text": "To sell things." }
text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mt0_base/test_mt0_base.json", "repo_id": "text-generation-inference", "token_count": 532 }
311
{ "choices": [ { "finish_reason": "stop", "index": 0, "logprobs": null, "message": { "content": "I'm an artificial intelligence model known as a large language model (LLM) or conversational AI", "role": "assistant", "tool_calls": null } } ], "created": 1741693957, "id": "", "model": "meta-llama/Llama-3.1-8B-Instruct", "object": "chat.completion", "system_fingerprint": "3.1.2-dev0-native", "usage": { "completion_tokens": 12, "prompt_tokens": 53, "total_tokens": 65 } }
text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_insufficient_information_nostream.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_tools_llama/test_flash_llama_grammar_tools_insufficient_information_nostream.json", "repo_id": "text-generation-inference", "token_count": 264 }
312
import pytest @pytest.fixture(scope="module") def flash_gemma_handle(launcher): with launcher("google/gemma-2b", num_shard=1) as handle: yield handle @pytest.fixture(scope="module") async def flash_gemma(flash_gemma_handle): await flash_gemma_handle.health(300) return flash_gemma_handle.client @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_gemma_simple(flash_gemma, response_snapshot): response = await flash_gemma.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_gemma_all_params(flash_gemma, response_snapshot): response = await flash_gemma.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.release @pytest.mark.asyncio @pytest.mark.private async def test_flash_gemma_load(flash_gemma, generate_load, response_snapshot): responses = await generate_load(flash_gemma, "Test request", max_new_tokens=10, n=4) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_gemma.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_gemma.py", "repo_id": "text-generation-inference", "token_count": 678 }
313
import pytest @pytest.fixture(scope="module") def flash_mistral_handle(launcher): with launcher("mistralai/Mistral-7B-Instruct-v0.1") as handle: yield handle @pytest.fixture(scope="module") async def flash_mistral(flash_mistral_handle): await flash_mistral_handle.health(300) return flash_mistral_handle.client @pytest.mark.asyncio async def test_flash_mistral(flash_mistral, response_snapshot): response = await flash_mistral.generate( "Test request", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response.generated_text == ": Let n = 10 - 1" assert response == response_snapshot @pytest.mark.asyncio async def test_flash_mistral_all_params(flash_mistral, response_snapshot): response = await flash_mistral.generate( "Test request", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_mistral_load(flash_mistral, generate_load, response_snapshot): responses = await generate_load( flash_mistral, "Test request", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all( [r.generated_text == responses[0].generated_text for r in responses] ), f"{[r.generated_text for r in responses]}" assert responses[0].generated_text == ": Let n = 10 - 1" assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_mistral.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_mistral.py", "repo_id": "text-generation-inference", "token_count": 714 }
314
import pytest import requests @pytest.fixture(scope="module") def flash_starcoder2_handle(launcher): with launcher( "bigcode/starcoder2-3b", lora_adapters=["smangrul/starcoder-3b-hugcoder"] ) as handle: yield handle @pytest.fixture(scope="module") async def flash_starcoder2(flash_starcoder2_handle): await flash_starcoder2_handle.health(300) return flash_starcoder2_handle.client @pytest.mark.asyncio async def test_flash_starcoder2(flash_starcoder2, response_snapshot): response = await flash_starcoder2.generate( "def print_hello", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_starcoder2_default_params(flash_starcoder2, response_snapshot): response = await flash_starcoder2.generate( "who are you?", max_new_tokens=60, temperature=0.2, top_p=0.95, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 60 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_starcoder2_load( flash_starcoder2, generate_load, response_snapshot ): responses = await generate_load( flash_starcoder2, "who are you?", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot @pytest.mark.asyncio async def test_flash_starcoder2_with_hugcode_adapter( flash_starcoder2, response_snapshot ): response = requests.post( f"{flash_starcoder2.base_url}/generate", headers=flash_starcoder2.headers, json={ "inputs": "def print_hello", "parameters": { "max_new_tokens": 10, "adapter_id": "smangrul/starcoder-3b-hugcoder", "details": True, }, }, ) assert response.status_code == 200 data = response.json() assert data["generated_text"] == '_world():\n print("Hello World!")\n' assert data == response_snapshot
text-generation-inference/integration-tests/models/test_flash_starcoder2_lora.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_starcoder2_lora.py", "repo_id": "text-generation-inference", "token_count": 940 }
315
import pytest @pytest.fixture(scope="module") def opt_sharded_handle(launcher): with launcher("facebook/opt-6.7b", num_shard=2) as handle: yield handle @pytest.fixture(scope="module") async def opt_sharded(opt_sharded_handle): await opt_sharded_handle.health(300) return opt_sharded_handle.client @pytest.mark.release @pytest.mark.asyncio async def test_opt(opt_sharded): pass
text-generation-inference/integration-tests/models/test_opt.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_opt.py", "repo_id": "text-generation-inference", "token_count": 160 }
316
use clap::{Parser, ValueEnum}; use hf_hub::{api::sync::ApiBuilder, Repo, RepoType}; use nix::sys::signal::{self, Signal}; use nix::unistd::Pid; use serde::Deserialize; use std::env; use std::ffi::OsString; use std::io::{BufRead, BufReader}; use std::os::unix::process::{CommandExt, ExitStatusExt}; use std::path::Path; use std::process::{Child, Command, ExitStatus, Stdio}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::TryRecvError; use std::sync::{mpsc, Arc}; use std::thread; use std::thread::sleep; use std::time::{Duration, Instant}; use std::{ fs, io, io::{Read, Write}, }; use thiserror::Error; use tracing_subscriber::{filter::LevelFilter, EnvFilter}; mod env_runtime; mod gpu; fn compute_optimal(config: Option<&Config>, compute: Option<&ComputeType>) -> Option<usize> { let config = config?; let compute = compute?; let f16_max_compute = compute.f16_flop()?; let model_compute = config.flop()?; tracing::debug!( "Max compute {} model compute {}", human_size(f16_max_compute as usize, "flop"), human_size(model_compute as usize, "flop") ); let optimal_size = (f16_max_compute / model_compute) as usize; if optimal_size > 100 { // Ignore calculations that's too low // Most likely an error Some(optimal_size) } else { None } } fn human_size(size: usize, suffix: &str) -> String { let mut size: f64 = size as f64; let mut p = ""; for prefix in ["", "K", "M", "G", "T"] { p = prefix; if size > 1_000.0 { size /= 1_000.0; } else { break; } } format!("{size:.2}{p}{suffix}") } fn vram_maximum( config: Option<&Config>, compute: Option<&ComputeType>, memory_fraction: f32, ) -> Option<usize> { let config = config?; let compute = compute?; let available = compute.vram(memory_fraction)?; let model = config.model_vram()?; let token_vram = config.token_vram()?; if let Some(vram) = available.checked_sub(model) { let tokens_allowed = vram / token_vram; tracing::debug!( "Available vram {}: model needs {}, every tokens requires {}, maximum allocatable tokens {tokens_allowed}", human_size(available, "B"), human_size(model, "B"), human_size(token_vram, "B"), ); Some(tokens_allowed) } else { tracing::warn!( "Not enough VRAM to run the model: Available: {} - Model {}.", human_size(available, "B"), human_size(model, "B") ); None } } fn get_config( model_id: &str, revision: &Option<String>, ) -> Result<Config, Box<dyn std::error::Error>> { let mut path = std::path::Path::new(model_id).to_path_buf(); let model_id = model_id.to_string(); let filename = if !path.exists() { // Assume it's a hub id let mut builder = ApiBuilder::from_env(); if let Ok(token) = std::env::var("HF_TOKEN") { // env variable has precedence over on file token. builder = builder.with_token(Some(token)) }; if let Ok(origin) = env::var("HF_HUB_USER_AGENT_ORIGIN") { builder = builder.with_user_agent("origin", origin.as_str()); } let api = builder.build()?; let repo = if let Some(ref revision) = revision { api.repo(Repo::with_revision( model_id, RepoType::Model, revision.to_string(), )) } else { api.model(model_id) }; repo.get("config.json")? } else { path.push("config.json"); path }; let content = std::fs::read_to_string(filename)?; let config: RawConfig = serde_json::from_str(&content)?; let config: Config = config.into(); Ok(config) } fn resolve_attention(config: &Option<Config>, lora_adapters: &Option<String>) -> (String, String) { let compute_capability = gpu::get_cuda_capability(); let mut prefix_caching: Option<String> = std::env::var("PREFIX_CACHING").ok(); let mut attention: Option<String> = std::env::var("ATTENTION").ok(); if let Some(config) = config { if prefix_caching.is_none() { if config.vision_config.is_some() { tracing::info!("Disabling prefix caching because of VLM model"); prefix_caching = Some("0".to_string()); } else if config.is_encoder_decoder { tracing::info!("Disabling prefix caching because of seq2seq model"); prefix_caching = Some("0".to_string()); } } let fallback_attention = if compute_capability.is_none() || matches!(compute_capability, Some((major, _)) if major < 8) { "paged" } else { "flashdecoding" }; match config.get_head_dim() { Some(h) if h == 64 || h == 128 || h == 256 => { if lora_adapters.is_some() && prefix_caching.is_none() { tracing::info!("Disabling prefix caching because of lora adapters"); prefix_caching = Some("0".to_string()); } match config.model_type.as_deref() { Some("falcon") | Some("deepseek_v2") => { // Required because gemma2 needs bfloat16 which is not supported by // flashinfer ? if attention.is_none() { tracing::info!( "Forcing attention to '{fallback_attention}' because model {} requires it", config.model_type.as_ref().unwrap() ); attention = Some(fallback_attention.to_string()); } if fallback_attention == "paged" && prefix_caching.is_none() { tracing::info!("Disabling prefix caching because it is not supported with 'paged' attention"); prefix_caching = Some("0".to_string()); } } Some("t5") => {} _ => {} } } _ => { if attention.is_none() { tracing::info!("Forcing attention to '{fallback_attention}' because head dim is not supported by flashinfer, also disabling prefix caching"); attention = Some(fallback_attention.to_string()); } if prefix_caching.is_none() { prefix_caching = Some("0".to_string()); } } } } if attention == Some("paged".to_string()) && prefix_caching.is_none() { tracing::info!("Disabling prefix caching on paged attention"); prefix_caching = Some("0".to_string()); } let attention = attention.unwrap_or("flashinfer".to_string()); let prefix_caching = prefix_caching.unwrap_or("true".to_string()); (prefix_caching, attention) } #[derive(Deserialize)] struct RawConfig { max_position_embeddings: Option<usize>, n_positions: Option<usize>, model_type: Option<String>, max_seq_len: Option<usize>, quantization_config: Option<QuantizationConfig>, n_embd: Option<usize>, hidden_size: Option<usize>, intermediate_size: Option<usize>, num_attention_heads: Option<usize>, num_key_value_heads: Option<usize>, num_hidden_layers: Option<usize>, head_dim: Option<usize>, text_config: Option<TextConfig>, vision_config: Option<VisionConfig>, is_encoder_decoder: Option<bool>, #[serde(rename = "num_experts_per_tok")] num_experts_per_token: Option<usize>, #[serde(rename = "n_shared_experts")] num_shared_experts: Option<usize>, #[serde(rename = "num_local_experts")] num_experts: Option<usize>, vocab_size: Option<usize>, } #[derive(Deserialize)] struct QuantizationConfig { quant_method: Option<Quantization>, } #[derive(Debug, Deserialize)] struct VisionConfig {} #[derive(Debug, Deserialize)] struct TextConfig { head_dim: Option<usize>, } #[derive(Debug, Deserialize)] struct Config { max_position_embeddings: Option<usize>, quantize: Option<Quantization>, head_dim: Option<usize>, num_heads: Option<usize>, num_kv_heads: Option<usize>, num_layers: Option<usize>, intermediate_size: Option<usize>, hidden_size: Option<usize>, model_type: Option<String>, text_config: Option<TextConfig>, vision_config: Option<VisionConfig>, is_encoder_decoder: bool, num_experts_per_token: usize, num_shared_experts: usize, num_experts: usize, vocab_size: Option<usize>, } impl Config { fn get_head_dim(&self) -> Option<usize> { if let Some(head_dim) = self.head_dim { return Some(head_dim); } let text_config = self.text_config.as_ref()?; if let Some(head_size) = text_config.head_dim { return Some(head_size); } match self.model_type.as_deref() { // We special-case gemma3 here, since we need flashinfer for // handling bidirectional masks. And flashinfer can only be // used when the head size is known. Some("gemma3") => Some(256), _ => None, } } fn flop(&self) -> Option<u64> { if self.vision_config.is_some() { // VLM are much harder to predict and VRAM requirements // Are more complex. return None; } let num_heads = self.num_heads? as u64; let num_kv_heads = self.num_kv_heads? as u64; let head_dim = self.get_head_dim()? as u64; let hidden_size = self.hidden_size? as u64; let intermediate_size = (self.intermediate_size? * (self.num_experts_per_token + self.num_shared_experts)) as u64; let num_layers = self.num_layers? as u64; let q_flops = 2 * num_heads * head_dim * hidden_size; let k_flops = 2 * num_kv_heads * head_dim * hidden_size; let v_flops = 2 * num_kv_heads * head_dim * hidden_size; let attn_flops = 2 * num_heads * head_dim * hidden_size; let o_flops = 2 * num_heads * head_dim * hidden_size; let attn_layer_flops = q_flops + k_flops + v_flops + attn_flops + o_flops; let gate_up_down_flops = 2 * 3 * hidden_size * intermediate_size; let layer_flops = attn_layer_flops + gate_up_down_flops; let total = layer_flops * num_layers; Some(total) } fn kv_vram_per_tok(&self) -> Option<usize> { if self.quantize.is_some() { // TODO handle quantization return None; } // 2 for key and values // 2 for f16 dtype? Some(self.num_kv_heads? * 2 * self.get_head_dim()? * 2 * self.num_layers?) } fn mlp_vram_per_tok(&self) -> Option<usize> { // TODO handle quantization // TODO This calculation depends on the actual implementation let dtype_size = 2; let mlp_size = self.intermediate_size?; // calculation is overshooting here. // Coming from here: https://github.com/vllm-project/vllm/blob/d1c2e15eb31ef12e688ce0cb71895f88eaf4cd4f/vllm/model_executor/layers/fused_moe/fused_moe.py#L618-L624 Some((mlp_size + mlp_size / 2) * self.num_experts * dtype_size * 3) } fn token_vram(&self) -> Option<usize> { let kv = self.kv_vram_per_tok()?; let mlp_intermediary = self.mlp_vram_per_tok()?; let per_tok = kv + mlp_intermediary; Some(per_tok) } fn model_vram(&self) -> Option<usize> { let attn_vram = (self.num_heads? + 2 * self.num_kv_heads?) * self.get_head_dim()?; let o_vram = self.num_heads? * self.get_head_dim()? * self.hidden_size?; // gate + up + down = 3 let mlp_vram = 3 * self.intermediate_size? * self.num_experts * self.hidden_size?; let layer_vram = mlp_vram + attn_vram + o_vram; let vocab = self.hidden_size? * self.vocab_size?; let params = layer_vram * self.num_layers? + 2 * vocab; let dtype_size = 2; if self.quantize.is_some() { // TODO handle quantization return None; } Some(params * dtype_size) } } impl From<RawConfig> for Config { fn from(other: RawConfig) -> Self { let max_position_embeddings = other .max_position_embeddings .or(other.max_seq_len) .or(other.n_positions); let quantize = other.quantization_config.and_then(|q| q.quant_method); let hidden_size = other.hidden_size.or(other.n_embd); let head_dim = other .head_dim .or_else(|| match (hidden_size, other.num_attention_heads) { (Some(hidden_size), Some(num_attention_heads)) if hidden_size % num_attention_heads == 0 => { Some(hidden_size / num_attention_heads) } _ => None, }); let num_heads = other.num_attention_heads; let num_layers = other.num_hidden_layers; let num_kv_heads = other.num_key_value_heads.or(other.num_attention_heads); let intermediate_size = other.intermediate_size; let model_type = other.model_type; let text_config = other.text_config; let vision_config = other.vision_config; let is_encoder_decoder = other.is_encoder_decoder.unwrap_or(false); let num_experts_per_token = other.num_experts_per_token.unwrap_or(1); let num_shared_experts = other.num_shared_experts.unwrap_or(0); let num_experts = other.num_experts.unwrap_or(1); let vocab_size = other.vocab_size; Config { max_position_embeddings, quantize, head_dim, model_type, text_config, vision_config, is_encoder_decoder, hidden_size, num_heads, num_kv_heads, intermediate_size, num_layers, num_experts_per_token, num_shared_experts, num_experts, vocab_size, } } } #[derive(Clone, Copy, Debug, ValueEnum, Deserialize)] #[serde(rename_all = "kebab-case")] enum Quantization { /// 4 bit quantization. Requires a specific AWQ quantized model: /// <https://hf.co/models?search=awq>. /// Should replace GPTQ models wherever possible because of the better latency Awq, /// Compressed tensors, which can be a mixture of different quantization methods. CompressedTensors, /// 8 bit quantization, doesn't require specific model. /// Should be a drop-in replacement to bitsandbytes with much better performance. /// Kernels are from <https://github.com/NetEase-FuXi/EETQ.git> Eetq, /// Variable bit quantization. Requires a specific EXL2 quantized model: /// <https://hf.co/models?search=exl2>. Requires exllama2 kernels and does /// not support tensor parallelism (num_shard > 1). Exl2, /// 4 bit quantization. Requires a specific GTPQ quantized model: <https://hf.co/models?search=gptq>. /// text-generation-inference will use exllama (faster) kernels wherever possible, and use /// triton kernel (wider support) when it's not. /// AWQ has faster kernels. Gptq, /// 4 bit quantization. Requires a specific Marlin quantized model: <https://hf.co/models?search=marlin>. Marlin, /// Bitsandbytes 8bit. Can be applied on any model, will cut the memory requirement in half, /// but it is known that the model will be much slower to run than the native f16. // #[deprecated( // since = "1.1.0", // note = "Use `eetq` instead, which provides better latencies overall and is drop-in in most cases" // )] Bitsandbytes, /// Bitsandbytes 4bit. Can be applied on any model, will cut the memory requirement by 4x, /// but it is known that the model will be much slower to run than the native f16. BitsandbytesNf4, /// Bitsandbytes 4bit. nf4 should be preferred in most cases but maybe this one has better /// perplexity performance for you model BitsandbytesFp4, /// [FP8](https://developer.nvidia.com/blog/nvidia-arm-and-intel-publish-fp8-specification-for-standardization-as-an-interchange-format-for-ai/) (e4m3) works on H100 and above /// This dtype has native ops should be the fastest if available. /// This is currently not the fastest because of local unpacking + padding to satisfy matrix /// multiplication limitations. Fp8, } impl std::fmt::Display for Quantization { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // To keep in track with `server`. match self { #[allow(deprecated)] // Use `eetq` instead, which provides better latencies overall and is drop-in in most cases Quantization::Bitsandbytes => { write!(f, "bitsandbytes") } Quantization::BitsandbytesNf4 => { write!(f, "bitsandbytes-nf4") } Quantization::BitsandbytesFp4 => { write!(f, "bitsandbytes-fp4") } Quantization::Exl2 => { write!(f, "exl2") } Quantization::Gptq => { write!(f, "gptq") } Quantization::Marlin => { write!(f, "marlin") } Quantization::Awq => { write!(f, "awq") } Quantization::CompressedTensors => { write!(f, "compressed-tensors") } Quantization::Eetq => { write!(f, "eetq") } Quantization::Fp8 => { write!(f, "fp8") } } } } #[derive(Clone, Copy, Debug, ValueEnum)] enum Dtype { Float16, #[clap(name = "bfloat16")] BFloat16, } impl std::fmt::Display for Dtype { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // To keep in track with `server`. match self { Dtype::Float16 => { write!(f, "float16") } Dtype::BFloat16 => { write!(f, "bfloat16") } } } } #[derive(Clone, Copy, Debug, ValueEnum)] enum KVCacheDtype { #[clap(name = "fp8_e4m3fn")] Fp8e4m3fn, #[clap(name = "fp8_e5m2")] Fp8e5m2, } impl std::fmt::Display for KVCacheDtype { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { KVCacheDtype::Fp8e4m3fn => { write!(f, "fp8_e4m3fn") } KVCacheDtype::Fp8e5m2 => { write!(f, "fp8_e5m2") } } } } #[derive(Clone, Copy, Debug, ValueEnum)] enum RopeScaling { Linear, Dynamic, } impl std::fmt::Display for RopeScaling { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // To keep in track with `server`. match self { RopeScaling::Linear => { write!(f, "linear") } RopeScaling::Dynamic => { write!(f, "dynamic") } } } } #[derive(Clone, Copy, Debug, ValueEnum)] pub enum UsageStatsLevel { /// Default option, usage statistics are collected anonymously On, /// Disables all collection of usage statistics Off, /// Doesn't send the error stack trace or error type, but allows sending a crash event NoStack, } impl std::fmt::Display for UsageStatsLevel { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // To keep in track with `server`. match self { UsageStatsLevel::On => { write!(f, "on") } UsageStatsLevel::Off => { write!(f, "off") } UsageStatsLevel::NoStack => { write!(f, "no-stack") } } } } /// App Configuration #[derive(Parser, Debug)] #[clap(author, version, about, long_about = None)] struct Args { /// The name of the model to load. /// Can be a MODEL_ID as listed on <https://hf.co/models> like /// `gpt2` or `OpenAssistant/oasst-sft-1-pythia-12b`. /// Or it can be a local directory containing the necessary files /// as saved by `save_pretrained(...)` methods of transformers #[clap(default_value = "bigscience/bloom-560m", long, env)] model_id: String, /// The actual revision of the model if you're referring to a model /// on the hub. You can use a specific commit id or a branch like `refs/pr/2`. #[clap(long, env)] revision: Option<String>, /// The number of tokenizer workers used for payload validation and truncation inside the /// router. #[clap(default_value = "2", long, env)] validation_workers: usize, /// Whether to shard the model across multiple GPUs /// By default text-generation-inference will use all available GPUs to run /// the model. Setting it to `false` deactivates `num_shard`. #[clap(long, env)] sharded: Option<bool>, /// The number of shards to use if you don't want to use all GPUs on a given machine. /// You can use `CUDA_VISIBLE_DEVICES=0,1 text-generation-launcher... --num_shard 2` /// and `CUDA_VISIBLE_DEVICES=2,3 text-generation-launcher... --num_shard 2` to /// launch 2 copies with 2 shard each on a given machine with 4 GPUs for instance. #[clap(long, env)] num_shard: Option<usize>, /// Quantization method to use for the model. It is not necessary to specify this option /// for pre-quantized models, since the quantization method is read from the model /// configuration. /// /// Marlin kernels will be used automatically for GPTQ/AWQ models. #[clap(long, env, value_enum)] quantize: Option<Quantization>, /// The number of input_ids to speculate on /// If using a medusa model, the heads will be picked up automatically /// Other wise, it will use n-gram speculation which is relatively free /// in terms of compute, but the speedup heavily depends on the task. #[clap(long, env)] speculate: Option<usize>, /// The dtype to be forced upon the model. This option cannot be used with `--quantize`. #[clap(long, env, value_enum)] dtype: Option<Dtype>, /// Specify the dtype for the key-value cache. When this option is not provided, /// the dtype of the model is used (typically `float16` or `bfloat16`). Currently /// the only supported value are `fp8_e4m3fn` and `fp8_e5m2` on CUDA. #[clap(long, env, value_enum)] kv_cache_dtype: Option<KVCacheDtype>, /// Whether you want to execute hub modelling code. Explicitly passing a `revision` is /// encouraged when loading a model with custom code to ensure no malicious code has been /// contributed in a newer revision. #[clap(long, env, value_enum)] trust_remote_code: bool, /// The maximum amount of concurrent requests for this particular deployment. /// Having a low limit will refuse clients requests instead of having them /// wait for too long and is usually good to handle backpressure correctly. #[clap(default_value = "128", long, env)] max_concurrent_requests: usize, /// This is the maximum allowed value for clients to set `best_of`. /// Best of makes `n` generations at the same time, and return the best /// in terms of overall log probability over the entire generated sequence #[clap(default_value = "2", long, env)] max_best_of: usize, /// This is the maximum allowed value for clients to set `stop_sequences`. /// Stop sequences are used to allow the model to stop on more than just /// the EOS token, and enable more complex "prompting" where users can preprompt /// the model in a specific way and define their "own" stop token aligned with /// their prompt. #[clap(default_value = "4", long, env)] max_stop_sequences: usize, /// This is the maximum allowed value for clients to set `top_n_tokens`. /// `top_n_tokens` is used to return information about the the `n` most likely /// tokens at each generation step, instead of just the sampled token. This /// information can be used for downstream tasks like for classification or /// ranking. #[clap(default_value = "5", long, env)] max_top_n_tokens: u32, /// This is the maximum allowed input length (expressed in number of tokens) /// for users. The larger this value, the longer prompt users can send which /// can impact the overall memory required to handle the load. /// Please note that some models have a finite range of sequence they can handle. /// Default to min(max_allocatable, max_position_embeddings) - 1 #[clap(long, env)] max_input_tokens: Option<usize>, /// Legacy version of [`Args::max_input_tokens`]. #[clap(long, env)] max_input_length: Option<usize>, /// This is the most important value to set as it defines the "memory budget" /// of running clients requests. /// Clients will send input sequences and ask to generate `max_new_tokens` /// on top. with a value of `1512` users can send either a prompt of /// `1000` and ask for `512` new tokens, or send a prompt of `1` and ask for /// `1511` max_new_tokens. /// The larger this value, the larger amount each request will be in your RAM /// and the less effective batching can be. /// Default to min(max_allocatable, max_position_embeddings) #[clap(long, env)] max_total_tokens: Option<usize>, /// This represents the ratio of waiting queries vs running queries where /// you want to start considering pausing the running queries to include the waiting /// ones into the same batch. /// `waiting_served_ratio=1.2` Means when 12 queries are waiting and there's /// only 10 queries left in the current batch we check if we can fit those 12 /// waiting queries into the batching strategy, and if yes, then batching happens /// delaying the 10 running queries by a `prefill` run. /// /// This setting is only applied if there is room in the batch /// as defined by `max_batch_total_tokens`. #[clap(default_value = "0.3", long, env)] waiting_served_ratio: f32, /// Limits the number of tokens for the prefill operation. /// Since this operation take the most memory and is compute bound, it is interesting /// to limit the number of requests that can be sent. /// Default to `max_input_tokens + 50` to give a bit of room. #[clap(long, env)] max_batch_prefill_tokens: Option<u32>, /// **IMPORTANT** This is one critical control to allow maximum usage /// of the available hardware. /// /// This represents the total amount of potential tokens within a batch. /// When using padding (not recommended) this would be equivalent of /// `batch_size` * `max_total_tokens`. /// /// However in the non-padded (flash attention) version this can be much finer. /// /// For `max_batch_total_tokens=1000`, you could fit `10` queries of `total_tokens=100` /// or a single query of `1000` tokens. /// /// Overall this number should be the largest possible amount that fits the /// remaining memory (after the model is loaded). Since the actual memory overhead /// depends on other parameters like if you're using quantization, flash attention /// or the model implementation, text-generation-inference infers this number automatically /// if not provided ensuring that the value is as large as possible. #[clap(long, env)] max_batch_total_tokens: Option<u32>, /// This setting defines how many tokens can be passed before forcing the waiting /// queries to be put on the batch (if the size of the batch allows for it). /// New queries require 1 `prefill` forward, which is different from `decode` /// and therefore you need to pause the running batch in order to run `prefill` /// to create the correct values for the waiting queries to be able to join the batch. /// /// With a value too small, queries will always "steal" the compute to run `prefill` /// and running queries will be delayed by a lot. /// /// With a value too big, waiting queries could wait for a very long time /// before being allowed a slot in the running batch. If your server is busy /// that means that requests that could run in ~2s on an empty server could /// end up running in ~20s because the query had to wait for 18s. /// /// This number is expressed in number of tokens to make it a bit more /// "model" agnostic, but what should really matter is the overall latency /// for end users. #[clap(default_value = "20", long, env)] max_waiting_tokens: usize, /// Enforce a maximum number of requests per batch /// Specific flag for hardware targets that do not support unpadded inference #[clap(long, env)] max_batch_size: Option<usize>, /// Specify the batch sizes to compute cuda graphs for. /// Use "0" to disable. /// Default = "1,2,4,8,16,32" #[clap(long, env, value_delimiter = ',')] cuda_graphs: Option<Vec<usize>>, /// The IP address to listen on #[clap(default_value = "0.0.0.0", long, env)] hostname: String, /// The port to listen on. #[clap(default_value = "3000", long, short, env)] port: u16, /// The Prometheus port to listen on. #[clap(default_value = "9000", long, short, env)] prometheus_port: u16, /// The name of the socket for gRPC communication between the webserver /// and the shards. #[clap(default_value = "/tmp/text-generation-server", long, env)] shard_uds_path: String, /// The address the master shard will listen on. (setting used by torch distributed) #[clap(default_value = "localhost", long, env)] master_addr: String, /// The address the master port will listen on. (setting used by torch distributed) #[clap(default_value = "29500", long, env)] master_port: usize, /// The location of the huggingface hub cache. /// Used to override the location if you want to provide a mounted disk for instance #[clap(long, env)] huggingface_hub_cache: Option<String>, /// The location of the huggingface hub cache. /// Used to override the location if you want to provide a mounted disk for instance #[clap(long, env)] weights_cache_override: Option<String>, /// For some models (like bloom), text-generation-inference implemented custom /// cuda kernels to speed up inference. Those kernels were only tested on A100. /// Use this flag to disable them if you're running on different hardware and /// encounter issues. #[clap(long, env)] disable_custom_kernels: bool, /// Limit the CUDA available memory. /// The allowed value equals the total visible memory multiplied by cuda-memory-fraction. #[clap(default_value = "1.0", long, env)] cuda_memory_fraction: f32, /// Rope scaling will only be used for RoPE models /// and allow rescaling the position rotary to accomodate for /// larger prompts. /// /// Goes together with `rope_factor`. /// /// `--rope-factor 2.0` gives linear scaling with a factor of 2.0 /// `--rope-scaling dynamic` gives dynamic scaling with a factor of 1.0 /// `--rope-scaling linear` gives linear scaling with a factor of 1.0 (Nothing will be changed /// basically) /// /// `--rope-scaling linear --rope-factor` fully describes the scaling you want #[clap(long, env)] rope_scaling: Option<RopeScaling>, /// Rope scaling will only be used for RoPE models /// See `rope_scaling` #[clap(long, env)] rope_factor: Option<f32>, /// Outputs the logs in JSON format (useful for telemetry) #[clap(long, env)] json_output: bool, #[clap(long, env)] otlp_endpoint: Option<String>, #[clap(default_value = "text-generation-inference.router", long, env)] otlp_service_name: String, #[clap(long, env)] cors_allow_origin: Vec<String>, #[clap(long, env)] api_key: Option<String>, #[clap(long, env)] watermark_gamma: Option<f32>, #[clap(long, env)] watermark_delta: Option<f32>, /// Enable ngrok tunneling #[clap(long, env)] ngrok: bool, /// ngrok authentication token #[clap(long, env)] ngrok_authtoken: Option<String>, /// ngrok edge #[clap(long, env)] ngrok_edge: Option<String>, /// The path to the tokenizer config file. This path is used to load the tokenizer configuration which may /// include a `chat_template`. If not provided, the default config will be used from the model hub. #[clap(long, env)] tokenizer_config_path: Option<String>, /// Disable outlines grammar constrained generation. /// This is a feature that allows you to generate text that follows a specific grammar. #[clap(long, env)] disable_grammar_support: bool, /// Display a lot of information about your runtime environment #[clap(long, short, action)] env: bool, /// Control the maximum number of inputs that a client can send in a single request #[clap(default_value = "4", long, env)] max_client_batch_size: usize, /// Lora Adapters a list of adapter ids i.e. `repo/adapter1,repo/adapter2` to load during /// startup that will be available to callers via the `adapter_id` field in a request. #[clap(long, env)] lora_adapters: Option<String>, /// Control if anonymous usage stats are collected. /// Options are "on", "off" and "no-stack" /// Defaul is on. #[clap(default_value = "on", long, env)] usage_stats: UsageStatsLevel, /// Payload size limit in bytes /// /// Default is 2MB #[clap(default_value = "2000000", long, env)] payload_limit: usize, /// Enables prefill logprobs /// /// Logprobs in the prompt are deactivated by default because they consume /// a large amount of VRAM (especially for long prompts). /// Using this flag reallows users to ask for them. #[clap(long, env)] enable_prefill_logprobs: bool, /// Change timeout of graceful termination of the TGI server #[clap(default_value = "90", long, short, env)] graceful_termination_timeout: u64, } #[derive(Debug)] enum ShardStatus { Ready, Failed(usize), } #[allow(clippy::too_many_arguments)] fn shard_manager( model_id: String, revision: Option<String>, quantize: Option<Quantization>, speculate: Option<usize>, dtype: Option<Dtype>, kv_cache_dtype: Option<KVCacheDtype>, trust_remote_code: bool, uds_path: String, rank: usize, world_size: usize, master_addr: String, master_port: usize, huggingface_hub_cache: Option<String>, weights_cache_override: Option<String>, disable_custom_kernels: bool, watermark_gamma: Option<f32>, watermark_delta: Option<f32>, cuda_graphs: Vec<usize>, cuda_memory_fraction: f32, rope_scaling: Option<RopeScaling>, rope_factor: Option<f32>, max_total_tokens: Option<usize>, max_batch_size: Option<usize>, max_input_tokens: Option<usize>, lora_adapters: Option<String>, enable_prefill_logprobs: bool, otlp_endpoint: Option<String>, otlp_service_name: String, log_level: LevelFilter, status_sender: mpsc::Sender<ShardStatus>, shutdown: Arc<AtomicBool>, graceful_termination_timeout: u64, _shutdown_sender: mpsc::Sender<()>, ) { // Enter shard-manager tracing span let _span = tracing::span!(tracing::Level::INFO, "shard-manager", rank = rank).entered(); // Get UDS path let uds_string = format!("{uds_path}-{rank}"); let uds = Path::new(&uds_string); // Clean previous runs if uds.exists() { fs::remove_file(uds).unwrap(); } // Process args let mut shard_args = vec![ "serve".to_string(), model_id, "--uds-path".to_string(), uds_path, "--logger-level".to_string(), log_level.to_string().to_uppercase(), "--json-output".to_string(), ]; // Activate trust remote code if trust_remote_code { shard_args.push("--trust-remote-code".to_string()); } // Activate tensor parallelism if world_size > 1 { shard_args.push("--sharded".to_string()); } if let Some(quantize) = quantize { shard_args.push("--quantize".to_string()); shard_args.push(quantize.to_string()) } if let Some(speculate) = speculate { shard_args.push("--speculate".to_string()); shard_args.push(speculate.to_string()) } if let Some(dtype) = dtype { shard_args.push("--dtype".to_string()); shard_args.push(dtype.to_string()) } if let Some(kv_cache_dtype) = kv_cache_dtype { shard_args.push("--kv-cache-dtype".to_string()); shard_args.push(kv_cache_dtype.to_string()) } // Model optional revision if let Some(revision) = revision { shard_args.push("--revision".to_string()); shard_args.push(revision) } let rope = match (rope_scaling, rope_factor) { (None, None) => None, (Some(scaling), None) => Some((scaling, 1.0)), (Some(scaling), Some(factor)) => Some((scaling, factor)), (None, Some(factor)) => Some((RopeScaling::Linear, factor)), }; // OpenTelemetry Endpoint if let Some(otlp_endpoint) = otlp_endpoint { shard_args.push("--otlp-endpoint".to_string()); shard_args.push(otlp_endpoint); } // OpenTelemetry Service Name shard_args.push("--otlp-service-name".to_string()); shard_args.push(otlp_service_name); // In case we use sliding window, we may ignore the sliding in flash for some backends depending on the parameter. if let Some(max_input_tokens) = max_input_tokens { shard_args.push("--max-input-tokens".to_string()); shard_args.push(max_input_tokens.to_string()); } // Copy current process env let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect(); // Remove LOG_LEVEL if present envs.retain(|(name, _)| name != "LOG_LEVEL"); // Torch Distributed Env vars envs.push(("RANK".into(), rank.to_string().into())); envs.push(("WORLD_SIZE".into(), world_size.to_string().into())); envs.push(("MASTER_ADDR".into(), master_addr.into())); envs.push(("MASTER_PORT".into(), master_port.to_string().into())); envs.push(("TORCH_NCCL_AVOID_RECORD_STREAMS".into(), "1".into())); // CUDA memory fraction envs.push(( "CUDA_MEMORY_FRACTION".into(), cuda_memory_fraction.to_string().into(), )); // Safetensors load fast envs.push(("SAFETENSORS_FAST_GPU".into(), "1".into())); // Disable progress bar envs.push(("HF_HUB_DISABLE_PROGRESS_BARS".into(), "1".into())); // Enable hf transfer for insane download speeds let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string()); envs.push(( "HF_HUB_ENABLE_HF_TRANSFER".into(), enable_hf_transfer.into(), )); // Parse Inference API token if let Ok(api_token) = env::var("HF_API_TOKEN") { envs.push(("HF_TOKEN".into(), api_token.into())) }; // Detect rope scaling // Sending as env instead of CLI args to not bloat everything // those only can be used by RoPE models, so passing information around // for all models will complexify code unnecessarily if let Some((scaling, factor)) = rope { envs.push(("ROPE_SCALING".into(), scaling.to_string().into())); envs.push(("ROPE_FACTOR".into(), factor.to_string().into())); } if let Some(max_total_tokens) = max_total_tokens { envs.push(( "MAX_TOTAL_TOKENS".into(), max_total_tokens.to_string().into(), )); } if let Some(max_batch_size) = max_batch_size { envs.push(("MAX_BATCH_SIZE".into(), max_batch_size.to_string().into())); } // Lora Adapters if let Some(lora_adapters) = lora_adapters { envs.push(("LORA_ADAPTERS".into(), lora_adapters.into())); } // Logprobs if enable_prefill_logprobs { envs.push(("REQUEST_LOGPROBS".into(), "1".into())); } // If huggingface_hub_cache is some, pass it to the shard // Useful when running inside a docker container if let Some(huggingface_hub_cache) = huggingface_hub_cache { envs.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into())); }; // If weights_cache_override is some, pass it to the shard // Useful when running inside a HuggingFace Inference Endpoint if let Some(weights_cache_override) = weights_cache_override { envs.push(( "WEIGHTS_CACHE_OVERRIDE".into(), weights_cache_override.into(), )); }; // Enable experimental support for cuda graphs if !cuda_graphs.is_empty() { envs.push(( "CUDA_GRAPHS".into(), cuda_graphs .into_iter() .map(|c| c.to_string()) .collect::<Vec<_>>() .join(",") .into(), )); } // If disable_custom_kernels is true, pass it to the shard as an env var if disable_custom_kernels { envs.push(("DISABLE_CUSTOM_KERNELS".into(), "True".into())) } // Watermark Gamma if let Some(watermark_gamma) = watermark_gamma { envs.push(("WATERMARK_GAMMA".into(), watermark_gamma.to_string().into())) } // Watermark Delta if let Some(watermark_delta) = watermark_delta { envs.push(("WATERMARK_DELTA".into(), watermark_delta.to_string().into())) } // Start process tracing::info!("Starting shard"); let mut p = match Command::new("text-generation-server") .args(shard_args) .env_clear() .envs(envs) .stdin(Stdio::piped()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .process_group(0) .spawn() { Ok(p) => p, Err(err) => { if err.kind() == io::ErrorKind::NotFound { tracing::error!("text-generation-server not found in PATH"); tracing::error!("Please install it with `make install-server`") } { tracing::error!("{}", err); } status_sender.send(ShardStatus::Failed(rank)).unwrap(); return; } }; // Redirect STDOUT to the console let mut pstdin = p.stdin.take().unwrap(); let shard_stdout_reader = BufReader::new(p.stdout.take().unwrap()); let shard_stderr_reader = BufReader::new(p.stderr.take().unwrap()); //stdout tracing thread thread::spawn(move || { log_lines(shard_stdout_reader); }); // We read stderr in another thread as it seems that lines() can block in some cases let (err_sender, err_receiver) = mpsc::channel(); thread::spawn(move || { for line in shard_stderr_reader.lines().map_while(Result::ok) { err_sender.send(line).unwrap_or(()); } }); // We read stdin in another thread as it seems that lines() can block in some cases if LevelFilter::current() >= tracing::Level::DEBUG { thread::spawn(move || { let mut stdin = io::stdin(); // We get `Stdin` here. loop { let mut buffer = vec![0; 4096]; if let Ok(n) = stdin.read(&mut buffer) { if n > 0 { let _ = pstdin.write_all(&buffer[..n]); } } } }); } let mut ready = false; let start_time = Instant::now(); let mut wait_time = Instant::now(); loop { // Process exited if let Some(exit_status) = p.try_wait().unwrap() { let mut err = String::new(); while let Ok(line) = err_receiver.recv_timeout(Duration::from_millis(10)) { err = err + "\n" + &line; } tracing::error!("Shard complete standard error output:\n{err}"); if let Some(signal) = exit_status.signal() { tracing::error!("Shard process was signaled to shutdown with signal {signal}"); } status_sender.send(ShardStatus::Failed(rank)).unwrap(); return; } // We received a shutdown signal if shutdown.load(Ordering::SeqCst) { terminate( "shard", p, Duration::from_secs(graceful_termination_timeout), ) .unwrap(); return; } // Shard is ready if uds.exists() && !ready { tracing::info!("Shard ready in {:?}", start_time.elapsed()); status_sender.send(ShardStatus::Ready).unwrap(); ready = true; } else if !ready && wait_time.elapsed() > Duration::from_secs(10) { tracing::info!("Waiting for shard to be ready..."); wait_time = Instant::now(); } sleep(Duration::from_millis(100)); } } fn shutdown_shards(shutdown: Arc<AtomicBool>, shutdown_receiver: &mpsc::Receiver<()>) { tracing::info!("Shutting down shards"); // Update shutdown value to true // This will be picked up by the shard manager shutdown.store(true, Ordering::SeqCst); // Wait for shards to shutdown // This will block till all shutdown_sender are dropped let _ = shutdown_receiver.recv(); } fn num_cuda_devices() -> Option<usize> { let devices = match env::var("CUDA_VISIBLE_DEVICES") { Ok(devices) => devices, Err(_) => match env::var("NVIDIA_VISIBLE_DEVICES") { Ok(devices) => { if devices.trim() == "all" { // Count the number of all GPUs via nvidia-smi let output = Command::new("nvidia-smi") .args(["--query-gpu=uuid", "--format=csv,noheader"]) .output() .ok()?; String::from_utf8_lossy(&output.stdout) .lines() .filter(|line| !line.trim().is_empty()) .count() .to_string() } else { devices } } Err(_) => env::var("ZE_AFFINITY_MASK").ok()?, }, }; let n_devices = devices.split(',').count(); Some(n_devices) } #[derive(Deserialize)] #[serde(rename_all = "UPPERCASE")] enum PythonLogLevelEnum { Trace, Debug, Info, Success, Warning, Error, Critical, } #[derive(Deserialize)] struct PythonLogLevel { name: PythonLogLevelEnum, } #[derive(Deserialize)] struct PythonLogRecord { level: PythonLogLevel, } #[derive(Deserialize)] struct PythonLogMessage { text: String, record: PythonLogRecord, } impl PythonLogMessage { fn trace(&self) { match self.record.level.name { PythonLogLevelEnum::Trace => tracing::trace!("{}", self.text.trim_end()), PythonLogLevelEnum::Debug => tracing::debug!("{}", self.text.trim_end()), PythonLogLevelEnum::Info => tracing::info!("{}", self.text.trim_end()), PythonLogLevelEnum::Success => tracing::info!("{}", self.text.trim_end()), PythonLogLevelEnum::Warning => tracing::warn!("{}", self.text.trim_end()), PythonLogLevelEnum::Error => tracing::error!("{}", self.text.trim_end()), PythonLogLevelEnum::Critical => tracing::error!("{}", self.text.trim_end()), } } } impl TryFrom<&[u8]> for PythonLogMessage { type Error = serde_json::Error; fn try_from(value: &[u8]) -> Result<Self, Self::Error> { serde_json::from_slice::<Self>(value) } } fn log_lines<R: Sized + Read>(mut bufread: BufReader<R>) { let mut buffer = vec![0u8; 8 * 4096]; let mut stdout = std::io::stdout(); loop { let n = bufread.read(&mut buffer); if let Ok(n) = n { if n > 0 { let mut lines = buffer[..n].split(|i| *i == b'\n').peekable(); while let Some(line) = lines.next() { match PythonLogMessage::try_from(line) { Ok(log) => log.trace(), // For interactive debugging ? Err(_) => { if LevelFilter::current() >= tracing::Level::DEBUG { stdout.write_all(line).unwrap(); if lines.peek().is_some() { stdout.write_all(b"\n").unwrap(); } stdout.flush().unwrap(); } } } } } else { break; } } } } fn find_num_shards( sharded: Option<bool>, num_shard: Option<usize>, ) -> Result<usize, LauncherError> { // get the number of shards given `sharded` and `num_shard` let num_shard = match (sharded, num_shard) { (Some(true), None) => { // try to default to the number of available GPUs tracing::info!("Parsing num_shard from CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES/ZE_AFFINITY_MASK"); let n_devices = num_cuda_devices() .expect("--num-shard and CUDA_VISIBLE_DEVICES/NVIDIA_VISIBLE_DEVICES/ZE_AFFINITY_MASK are not set"); if n_devices <= 1 { return Err(LauncherError::NotEnoughCUDADevices(format!( "`sharded` is true but only found {n_devices} CUDA devices" ))); } n_devices } (Some(true), Some(num_shard)) => { // we can't have only one shard while sharded if num_shard <= 1 { return Err(LauncherError::ArgumentValidation( "`sharded` is true but `num_shard` <= 1".to_string(), )); } num_shard } (Some(false), Some(num_shard)) => num_shard, (Some(false), None) => 1, (None, None) => num_cuda_devices().unwrap_or(1), (None, Some(num_shard)) => num_shard, }; if num_shard < 1 { return Err(LauncherError::ArgumentValidation( "`num_shard` cannot be < 1".to_string(), )); } Ok(num_shard) } #[derive(Debug, Error)] enum LauncherError { #[error("Invalid argument: {0}")] ArgumentValidation(String), #[error("not enough cuda devices: {0}")] NotEnoughCUDADevices(String), #[error("Download error")] DownloadError, #[error("Shard cannot start")] ShardCannotStart, #[error("Shard disconnected")] ShardDisconnected, #[error("Shard failed")] ShardFailed, #[error("Webserver failed")] WebserverFailed, #[error("Webserver cannot start")] WebserverCannotStart, } fn download_convert_model( model_id: &str, revision: Option<&str>, trust_remote_code: bool, huggingface_hub_cache: Option<&str>, weights_cache_override: Option<&str>, running: Arc<AtomicBool>, merge_lora: bool, ) -> Result<(), LauncherError> { // Enter download tracing span let _span = tracing::span!(tracing::Level::INFO, "download").entered(); let mut download_args = vec![ "download-weights".to_string(), model_id.to_string(), "--extension".to_string(), ".safetensors".to_string(), "--logger-level".to_string(), "INFO".to_string(), "--json-output".to_string(), ]; if merge_lora { download_args.push("--merge-lora".to_string()); } // Model optional revision if let Some(revision) = &revision { download_args.push("--revision".to_string()); download_args.push(revision.to_string()) } // Trust remote code for automatic peft fusion if trust_remote_code { download_args.push("--trust-remote-code".to_string()); } // Copy current process env let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect(); // Remove LOG_LEVEL if present envs.retain(|(name, _)| name != "LOG_LEVEL"); // Disable progress bar envs.push(("HF_HUB_DISABLE_PROGRESS_BARS".into(), "1".into())); // If huggingface_hub_cache is set, pass it to the download process // Useful when running inside a docker container if let Some(ref huggingface_hub_cache) = huggingface_hub_cache { envs.push(("HUGGINGFACE_HUB_CACHE".into(), huggingface_hub_cache.into())); }; // Enable hf transfer for insane download speeds let enable_hf_transfer = env::var("HF_HUB_ENABLE_HF_TRANSFER").unwrap_or("1".to_string()); envs.push(( "HF_HUB_ENABLE_HF_TRANSFER".into(), enable_hf_transfer.into(), )); // Parse Inference API token if let Ok(api_token) = env::var("HF_API_TOKEN") { envs.push(("HF_TOKEN".into(), api_token.into())) }; // If args.weights_cache_override is some, pass it to the download process // Useful when running inside a HuggingFace Inference Endpoint if let Some(weights_cache_override) = &weights_cache_override { envs.push(( "WEIGHTS_CACHE_OVERRIDE".into(), weights_cache_override.into(), )); }; // Start process tracing::info!("Starting check and download process for {model_id}"); let mut download_process = match Command::new("text-generation-server") .args(download_args) .env_clear() .envs(envs) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .process_group(0) .spawn() { Ok(p) => p, Err(err) => { if err.kind() == io::ErrorKind::NotFound { tracing::error!("text-generation-server not found in PATH"); tracing::error!("Please install it with `make install-server`") } else { tracing::error!("{}", err); } return Err(LauncherError::DownloadError); } }; let download_stdout = BufReader::new(download_process.stdout.take().unwrap()); thread::spawn(move || { log_lines(download_stdout); }); let download_stderr = BufReader::new(download_process.stderr.take().unwrap()); // We read stderr in another thread as it seems that lines() can block in some cases let (err_sender, err_receiver) = mpsc::channel(); thread::spawn(move || { for line in download_stderr.lines().map_while(Result::ok) { err_sender.send(line).unwrap_or(()); } }); loop { if let Some(status) = download_process.try_wait().unwrap() { if status.success() { tracing::info!("Successfully downloaded weights for {model_id}"); break; } let mut err = String::new(); while let Ok(line) = err_receiver.recv_timeout(Duration::from_millis(10)) { err = err + "\n" + &line; } if let Some(signal) = status.signal() { tracing::error!( "Download process was signaled to shutdown with signal {signal}: {err}" ); } else { tracing::error!("Download encountered an error: {err}"); } return Err(LauncherError::DownloadError); } if !running.load(Ordering::SeqCst) { terminate("download", download_process, Duration::from_secs(10)).unwrap(); return Ok(()); } sleep(Duration::from_millis(100)); } Ok(()) } #[allow(clippy::too_many_arguments)] fn spawn_shards( num_shard: usize, args: &Args, cuda_graphs: Vec<usize>, max_total_tokens: Option<usize>, max_input_tokens: Option<usize>, quantize: Option<Quantization>, max_log_level: LevelFilter, shutdown: Arc<AtomicBool>, shutdown_receiver: &mpsc::Receiver<()>, shutdown_sender: mpsc::Sender<()>, status_receiver: &mpsc::Receiver<ShardStatus>, status_sender: mpsc::Sender<ShardStatus>, running: Arc<AtomicBool>, graceful_termination_timeout: u64, ) -> Result<(), LauncherError> { // Start shard processes for rank in 0..num_shard { let model_id = args.model_id.clone(); let revision = args.revision.clone(); let uds_path = args.shard_uds_path.clone(); let master_addr = args.master_addr.clone(); let huggingface_hub_cache = args.huggingface_hub_cache.clone(); let weights_cache_override = args.weights_cache_override.clone(); let status_sender = status_sender.clone(); let shutdown = shutdown.clone(); let shutdown_sender = shutdown_sender.clone(); let otlp_endpoint = args.otlp_endpoint.clone(); let otlp_service_name = args.otlp_service_name.clone(); let speculate = args.speculate; let dtype = args.dtype; let kv_cache_dtype = args.kv_cache_dtype; let trust_remote_code = args.trust_remote_code; let master_port = args.master_port; let disable_custom_kernels = args.disable_custom_kernels; let watermark_gamma = args.watermark_gamma; let watermark_delta = args.watermark_delta; let cuda_graphs_clone = cuda_graphs.clone(); let cuda_memory_fraction = args.cuda_memory_fraction; let rope_scaling = args.rope_scaling; let rope_factor = args.rope_factor; let max_batch_size = args.max_batch_size; let lora_adapters = args.lora_adapters.clone(); let enable_prefill_logprobs = args.enable_prefill_logprobs; thread::spawn(move || { shard_manager( model_id, revision, quantize, speculate, dtype, kv_cache_dtype, trust_remote_code, uds_path, rank, num_shard, master_addr, master_port, huggingface_hub_cache, weights_cache_override, disable_custom_kernels, watermark_gamma, watermark_delta, cuda_graphs_clone, cuda_memory_fraction, rope_scaling, rope_factor, max_total_tokens, max_batch_size, max_input_tokens, lora_adapters, enable_prefill_logprobs, otlp_endpoint, otlp_service_name, max_log_level, status_sender, shutdown, graceful_termination_timeout, shutdown_sender, ) }); } drop(shutdown_sender); // Wait for shard to start let mut shard_ready = 0; while running.load(Ordering::SeqCst) { match status_receiver.try_recv() { Ok(ShardStatus::Ready) => { shard_ready += 1; if shard_ready == num_shard { break; } } Err(TryRecvError::Empty) => { sleep(Duration::from_millis(100)); } Ok(ShardStatus::Failed(rank)) => { tracing::error!("Shard {rank} failed to start"); shutdown_shards(shutdown, shutdown_receiver); return Err(LauncherError::ShardCannotStart); } Err(TryRecvError::Disconnected) => { tracing::error!("Shard status channel disconnected"); shutdown_shards(shutdown, shutdown_receiver); return Err(LauncherError::ShardDisconnected); } } } Ok(()) } #[derive(Debug)] enum Gpu { RTX4090, T4, L4, L40, L40S, A10G, A40, H100, A100, H200, Unknown(String), } #[derive(Debug)] struct ComputeType { count: usize, card: Gpu, } impl From<&str> for Gpu { fn from(value: &str) -> Self { match value { "nvidia-4090" => Gpu::RTX4090, "nvidia-t4" => Gpu::T4, "nvidia-l4" => Gpu::L4, "nvidia-l40" => Gpu::L40, "nvidia-l40s" => Gpu::L40S, "nvidia-a10g" => Gpu::A10G, "nvidia-a40" => Gpu::A40, "nvidia-h100-80gb-hbm3" => Gpu::H100, "nvidia-h100-nvl" => Gpu::H100, "nvidia-h100" => Gpu::H100, "nvidia-a100-sxm4-80gb" => Gpu::A100, "nvidia-a100-sxm4-40gb" => Gpu::A100, "nvidia-a100-80gb-pcie" => Gpu::A100, "nvidia-a100" => Gpu::A100, "nvidia-h200" => Gpu::H200, card => Gpu::Unknown(card.to_string()), } } } impl std::fmt::Display for Gpu { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Gpu::RTX4090 => write!(f, "nvidia-4090"), Gpu::T4 => write!(f, "nvidia-t4"), Gpu::L4 => write!(f, "nvidia-l4"), Gpu::L40 => write!(f, "nvidia-l40"), Gpu::L40S => write!(f, "nvidia-l40s"), Gpu::A10G => write!(f, "nvidia-a10g"), Gpu::A40 => write!(f, "nvidia-a40"), Gpu::H100 => write!(f, "nvidia-h100-80fb-hbm3"), Gpu::A100 => write!(f, "nvidia-a100-sxm4-80gb"), Gpu::H200 => write!(f, "nvidia-h200"), Gpu::Unknown(card) => write!(f, "{}", card), } } } impl ComputeType { fn f16_flop(&self) -> Option<u64> { let card_flop = match &self.card { // https://www.nvidia.com/en-us/geforce/graphics-cards/40-series/rtx-4090/ // Specs are unclear https://www.itcreations.com/nvidia-gpu/nvidia-geforce-rtx-4090-gpu Gpu::RTX4090 => Some(82 * 10u64.pow(12)), // https://www.nvidia.com/en-us/data-center/tesla-t4/ Gpu::T4 => Some(65 * 10u64.pow(12)), // https://www.nvidia.com/en-us/data-center/l4/ Gpu::L4 => Some(121 * 10u64.pow(12)), // https://www.nvidia.com/en-us/data-center/l40/ Gpu::L40 => Some(181 * 10u64.pow(12)), // https://www.nvidia.com/en-us/data-center/l40s/ Gpu::L40S => Some(363 * 10u64.pow(12)), // https://www.nvidia.com/en-us/data-center/products/a10-gpu/ Gpu::A10G => Some(125 * 10u64.pow(12)), // https://www.nvidia.com/en-us/data-center/a40/ // https://images.nvidia.com/content/Solutions/data-center/a40/nvidia-a40-datasheet.pdf Gpu::A40 => Some(149 * 10u64.pow(12)), // https://www.nvidia.com/content/dam/en-zz/Solutions/Data-Center/a100/pdf/nvidia-a100-datasheet-us-nvidia-1758950-r4-web.pdf Gpu::A100 => Some(312 * 10u64.pow(12)), // https://www.nvidia.com/en-us/data-center/h100/ // https://www.techpowerup.com/gpu-specs/docs/nvidia-gh100-architecture.pdf Gpu::H100 => Some(900 * 10u64.pow(12)), // https://www.nvidia.com/en-us/data-center/h200/ Gpu::H200 => Some(989 * 10u64.pow(12)), Gpu::Unknown(card) => { tracing::warn!("Unkown compute for card {card}"); None } }; card_flop.map(|f| f * self.count as u64) } fn vram(&self, memory_fraction: f32) -> Option<usize> { let output = Command::new("nvidia-smi") .args(["--query-gpu=memory.total", "--format=csv"]) .output() .ok()?; let output = String::from_utf8(output.stdout).ok()?; let fullname = output.split('\n').nth(1)?; let mut tokens = fullname.split(' '); let amount = tokens.next()?; let unit = tokens.next()?; if unit != "MiB" { tracing::warn!("Unexpected memory unit {unit}, expected MiB"); return None; } let amount: usize = amount.parse().ok()?; let amount = amount * 2usize.pow(20); let wiggle_room: f32 = env::var("TGI_WIGGLE_ROOM") .ok() .and_then(|wiggle| wiggle.parse().ok()) .unwrap_or(0.95); let total = amount * self.count; let adjusted = ((total as f32) * memory_fraction * wiggle_room) as usize; Some(adjusted) } } impl From<ComputeType> for OsString { fn from(value: ComputeType) -> Self { format!("{}-{}", value.count, value.card).into() } } fn compute_type(count: usize) -> Option<ComputeType> { let output = Command::new("nvidia-smi") .args(["--query-gpu=gpu_name", "--format=csv"]) .output() .ok()?; let output = String::from_utf8(output.stdout).ok()?; let fullname = output.split('\n').nth(1)?; let cardname = fullname.replace(' ', "-").to_lowercase(); let card = (&*cardname).into(); Some(ComputeType { count, card }) } fn spawn_webserver( num_shard: usize, args: Args, max_input_tokens: Option<usize>, max_total_tokens: Option<usize>, max_batch_prefill_tokens: u32, shutdown: Arc<AtomicBool>, shutdown_receiver: &mpsc::Receiver<()>, ) -> Result<Child, LauncherError> { // All shard started // Start webserver tracing::info!("Starting Webserver"); let mut router_args = vec![ "--max-client-batch-size".to_string(), args.max_client_batch_size.to_string(), "--max-concurrent-requests".to_string(), args.max_concurrent_requests.to_string(), "--max-best-of".to_string(), args.max_best_of.to_string(), "--max-stop-sequences".to_string(), args.max_stop_sequences.to_string(), "--max-top-n-tokens".to_string(), args.max_top_n_tokens.to_string(), "--max-batch-prefill-tokens".to_string(), max_batch_prefill_tokens.to_string(), "--waiting-served-ratio".to_string(), args.waiting_served_ratio.to_string(), "--max-waiting-tokens".to_string(), args.max_waiting_tokens.to_string(), "--validation-workers".to_string(), args.validation_workers.to_string(), "--hostname".to_string(), args.hostname.to_string(), "--port".to_string(), args.port.to_string(), "--prometheus-port".to_string(), args.prometheus_port.to_string(), "--master-shard-uds-path".to_string(), format!("{}-0", args.shard_uds_path), "--tokenizer-name".to_string(), args.model_id, "--payload-limit".to_string(), args.payload_limit.to_string(), ]; if let Some(max_input_tokens) = max_input_tokens { router_args.extend_from_slice(&[ "--max-input-tokens".to_string(), max_input_tokens.to_string(), ]); } if let Some(max_total_tokens) = max_total_tokens { router_args.extend_from_slice(&[ "--max-total-tokens".to_string(), max_total_tokens.to_string(), ]); } // Pass usage stats flags to router router_args.push("--usage-stats".to_string()); router_args.push(args.usage_stats.to_string()); // Grammar support if args.disable_grammar_support { router_args.push("--disable-grammar-support".to_string()); } // Tokenizer config path if let Some(ref tokenizer_config_path) = args.tokenizer_config_path { router_args.push("--tokenizer-config-path".to_string()); router_args.push(tokenizer_config_path.to_string()); } // Model optional max batch total tokens if let Some(max_batch_total_tokens) = args.max_batch_total_tokens { router_args.push("--max-batch-total-tokens".to_string()); router_args.push(max_batch_total_tokens.to_string()); } // Router optional max batch size if let Some(max_batch_size) = args.max_batch_size { router_args.push("--max-batch-size".to_string()); router_args.push(max_batch_size.to_string()); } // Model optional revision if let Some(ref revision) = args.revision { router_args.push("--revision".to_string()); router_args.push(revision.to_string()) } if args.trust_remote_code { router_args.push("--trust-remote-code".to_string()); } if args.json_output { router_args.push("--json-output".to_string()); } // OpenTelemetry if let Some(otlp_endpoint) = args.otlp_endpoint { router_args.push("--otlp-endpoint".to_string()); router_args.push(otlp_endpoint); } // OpenTelemetry let otlp_service_name = args.otlp_service_name; router_args.push("--otlp-service-name".to_string()); router_args.push(otlp_service_name); // CORS origins for origin in args.cors_allow_origin.into_iter() { router_args.push("--cors-allow-origin".to_string()); router_args.push(origin); } // API Key if let Some(api_key) = args.api_key { router_args.push("--api-key".to_string()); router_args.push(api_key); } // Ngrok if args.ngrok { router_args.push("--ngrok".to_string()); router_args.push("--ngrok-authtoken".to_string()); router_args.push(args.ngrok_authtoken.unwrap()); router_args.push("--ngrok-edge".to_string()); router_args.push(args.ngrok_edge.unwrap()); } // Copy current process env let mut envs: Vec<(OsString, OsString)> = env::vars_os().collect(); // Parse Inference API token if let Ok(api_token) = env::var("HF_API_TOKEN") { envs.push(("HF_TOKEN".into(), api_token.into())) }; // Parse Compute type if let Ok(compute_type) = env::var("COMPUTE_TYPE") { envs.push(("COMPUTE_TYPE".into(), compute_type.into())) } else if let Some(compute_type) = compute_type(num_shard) { envs.push(("COMPUTE_TYPE".into(), compute_type.into())) } let mut webserver = match Command::new("text-generation-router") .args(router_args) .envs(envs) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .process_group(0) .spawn() { Ok(p) => p, Err(err) => { tracing::error!("Failed to start webserver: {}", err); if err.kind() == io::ErrorKind::NotFound { tracing::error!("text-generation-router not found in PATH"); tracing::error!("Please install it with `make install-router`") } else { tracing::error!("{}", err); } shutdown_shards(shutdown, shutdown_receiver); return Err(LauncherError::WebserverCannotStart); } }; // Redirect STDOUT and STDERR to the console let webserver_stdout = webserver.stdout.take().unwrap(); let webserver_stderr = webserver.stderr.take().unwrap(); thread::spawn(move || { let stdout = BufReader::new(webserver_stdout); let stderr = BufReader::new(webserver_stderr); for line in stdout.lines() { println!("{}", line.unwrap()); } for line in stderr.lines() { println!("{}", line.unwrap()); } }); Ok(webserver) } fn terminate(process_name: &str, mut process: Child, timeout: Duration) -> io::Result<ExitStatus> { tracing::info!("Terminating {process_name}"); let terminate_time = Instant::now(); signal::kill(Pid::from_raw(process.id() as i32), Signal::SIGTERM).unwrap(); tracing::info!("Waiting for {process_name} to gracefully shutdown"); while terminate_time.elapsed() < timeout { if let Some(status) = process.try_wait()? { tracing::info!("{process_name} terminated"); return Ok(status); } sleep(Duration::from_millis(100)); } tracing::info!("Killing {process_name}"); process.kill()?; let exit_status = process.wait()?; tracing::info!("{process_name} killed"); Ok(exit_status) } fn main() -> Result<(), LauncherError> { // Pattern match configuration let args: Args = Args::parse(); let graceful_termination_timeout = args.graceful_termination_timeout; // Filter events with LOG_LEVEL let varname = "LOG_LEVEL"; let env_filter = if let Ok(log_level) = std::env::var(varname) { // Override to avoid simple logs to be spammed with tokio level informations let log_level = match &log_level[..] { "warn" => "text_generation_launcher=warn,text_generation_router=warn", "info" => "text_generation_launcher=info,text_generation_router=info", "debug" => "text_generation_launcher=debug,text_generation_router=debug", log_level => log_level, }; EnvFilter::builder() .with_default_directive(LevelFilter::INFO.into()) .parse_lossy(log_level) } else { EnvFilter::new("info") }; let max_log_level = env_filter.max_level_hint().unwrap_or(LevelFilter::INFO); if args.json_output { tracing_subscriber::fmt() .with_env_filter(env_filter) .json() .init(); } else { tracing_subscriber::fmt() .with_env_filter(env_filter) .compact() .init(); } if args.env { let env_runtime = env_runtime::Env::new(); tracing::info!("{}", env_runtime); } tracing::info!("{:#?}", args); let config: Option<Config> = get_config(&args.model_id, &args.revision).ok(); let quantize = config.as_ref().and_then(|c| c.quantize); // Quantization usually means you're even more RAM constrained. let (prefix_caching, attention) = resolve_attention(&config, &args.lora_adapters); tracing::info!("Using attention {attention} - Prefix caching {prefix_caching}"); std::env::set_var("PREFIX_CACHING", prefix_caching); std::env::set_var("ATTENTION", attention); let num_shard = find_num_shards(args.sharded, args.num_shard)?; if num_shard > 1 { if matches!(args.quantize, Some(Quantization::Exl2)) { return Err(LauncherError::ArgumentValidation( "Sharding is currently not supported with `exl2` quantization".into(), )); } tracing::info!("Sharding model on {num_shard} processes"); } let max_input_tokens = { match (args.max_input_tokens, args.max_input_length) { (Some(max_input_tokens), Some(max_input_length)) => { return Err(LauncherError::ArgumentValidation( format!("Both `max_input_tokens` ({max_input_tokens}) and `max_input_length` ({max_input_length}) are set. Please define only `max_input_tokens` as `max_input_length is deprecated for naming consistency.", ))); } (Some(max_input_tokens), None) | (None, Some(max_input_tokens)) => { Some(max_input_tokens) } (None, None) => None, } }; let max_total_tokens = args.max_total_tokens; let max_batch_prefill_tokens = { match args.max_batch_prefill_tokens { Some(max_batch_prefill_tokens) => max_batch_prefill_tokens, None => { let compute_type = compute_type(num_shard); let compute_optimal = compute_optimal(config.as_ref(), compute_type.as_ref()); // TODO: remove this when we correctly esimate the flops for VLMs // this is a short term temporary fix to enable vlms to avoid rejecting images let default_optimal = match config { Some(ref config) => match config.model_type.as_deref() { Some("qwen2_vl") | Some("qwen2_5_vl") => 10_000, Some("gemma3") => 8000, _ => 4096, }, None => 4096, }; let default = compute_optimal.unwrap_or(default_optimal); let vram_maximum = vram_maximum( config.as_ref(), compute_type.as_ref(), args.cuda_memory_fraction, ); let max_position_embeddings = config.and_then(|c| c.max_position_embeddings); let value = if let Some(max_position_embeddings) = max_position_embeddings { default.min(max_position_embeddings) } else { default }; let value = if let Some(vram_maximum) = vram_maximum { if vram_maximum < value { tracing::warn!("Reducing the max batch prefill from {default} to {vram_maximum} because there is not enough VRAM to support it."); } value.min(vram_maximum) } else { value }; tracing::info!("Default `max_batch_prefill_tokens` to {value}"); value as u32 } } }; // Validate args if let (Some(max_input_tokens), Some(max_total_tokens)) = (max_input_tokens, max_total_tokens) { if max_input_tokens >= max_total_tokens { return Err(LauncherError::ArgumentValidation( format!("`max_input_tokens`({max_input_tokens}) must be < `max_total_tokens`({max_total_tokens})"), )); } } if matches!(args.quantize, Some(Quantization::Bitsandbytes)) { tracing::warn!("Bitsandbytes is deprecated, use `eetq` instead, which provides better latencies overall and is drop-in in most cases."); } let quantize = args.quantize.or(quantize); let cuda_graphs = match (&args.cuda_graphs, &quantize) { (Some(cuda_graphs), _) => cuda_graphs.iter().cloned().filter(|&c| c > 0).collect(), #[allow(deprecated)] (None, Some(Quantization::Bitsandbytes)) => { tracing::warn!("Bitsandbytes doesn't work with cuda graphs, deactivating them"); vec![] } (None, Some(Quantization::Exl2)) => { tracing::warn!("Exl2 doesn't work with cuda graphs, deactivating them"); vec![] } _ => { let cuda_graphs = vec![1, 2, 4, 8, 16, 32]; tracing::info!("Using default cuda graphs {cuda_graphs:?}"); cuda_graphs } }; if args.validation_workers == 0 { return Err(LauncherError::ArgumentValidation( "`validation_workers` must be > 0".to_string(), )); } if args.trust_remote_code { tracing::warn!( "`trust_remote_code` is set. Trusting that model `{}` do not contain malicious code.", args.model_id ); } if let Some(ref max_batch_total_tokens) = args.max_batch_total_tokens { if let Some(max_total_tokens) = max_total_tokens { if max_total_tokens as u32 > *max_batch_total_tokens { return Err(LauncherError::ArgumentValidation(format!( "`max_total_tokens` must be <= `max_batch_total_tokens`. Given: {} and {}", max_total_tokens, max_batch_total_tokens ))); } } } if args.ngrok { if args.ngrok_authtoken.is_none() { return Err(LauncherError::ArgumentValidation( "`ngrok-authtoken` must be set when using ngrok tunneling".to_string(), )); } if args.ngrok_edge.is_none() { return Err(LauncherError::ArgumentValidation( "`ngrok-edge` must be set when using ngrok tunneling".to_string(), )); } } // Signal handler let running = Arc::new(AtomicBool::new(true)); let r = running.clone(); ctrlc::set_handler(move || { r.store(false, Ordering::SeqCst); }) .expect("Error setting Ctrl-C handler"); // Download and convert model weights download_convert_model( &args.model_id, args.revision.as_deref(), args.trust_remote_code, args.huggingface_hub_cache.as_deref(), args.weights_cache_override.as_deref(), running.clone(), true, // if its only a lora model - we should merge the lora adapters )?; // Download and convert lora adapters if any if let Some(lora_adapters) = &args.lora_adapters { for adapter in lora_adapters.split(',') { // skip download if a path is provided if adapter.contains('=') { continue; } let adapter = adapter.trim(); // check if adapter has more than 1 '@' if adapter.matches('@').count() > 1 { return Err(LauncherError::ArgumentValidation(format!( "Invalid LoRA adapter format: {}", adapter ))); } // capture adapter_id, path, revision in format of adapter_id=path@revision // path is disabled beforehand. let mut splits = adapter.split("@"); let adapter_id = splits.next().ok_or_else(|| { LauncherError::ArgumentValidation("Missing adapter id".to_string()) })?; let revision = splits.next(); download_convert_model( adapter_id, revision, args.trust_remote_code, args.huggingface_hub_cache.as_deref(), args.weights_cache_override.as_deref(), running.clone(), false, // avoid merging lora adapters if using multi-lora )?; } } if !running.load(Ordering::SeqCst) { // Launcher was asked to stop return Ok(()); } // Shared shutdown bool let shutdown = Arc::new(AtomicBool::new(false)); // Shared shutdown channel // When shutting down, the main thread will wait for all senders to be dropped let (shutdown_sender, shutdown_receiver) = mpsc::channel(); // Shared channel to track shard status let (status_sender, status_receiver) = mpsc::channel(); spawn_shards( num_shard, &args, cuda_graphs, max_total_tokens, max_input_tokens, quantize, max_log_level, shutdown.clone(), &shutdown_receiver, shutdown_sender, &status_receiver, status_sender, running.clone(), graceful_termination_timeout, )?; // We might have received a termination signal if !running.load(Ordering::SeqCst) { shutdown_shards(shutdown, &shutdown_receiver); return Ok(()); } let mut webserver = spawn_webserver( num_shard, args, max_input_tokens, max_total_tokens, max_batch_prefill_tokens, shutdown.clone(), &shutdown_receiver, ) .inspect_err(|_| { shutdown_shards(shutdown.clone(), &shutdown_receiver); })?; // Default exit code let mut exit_code = Ok(()); while running.load(Ordering::SeqCst) { if let Ok(ShardStatus::Failed(rank)) = status_receiver.try_recv() { tracing::error!("Shard {rank} crashed"); exit_code = Err(LauncherError::ShardFailed); break; }; match webserver.try_wait().unwrap() { Some(_) => { tracing::error!("Webserver Crashed"); shutdown_shards(shutdown, &shutdown_receiver); return Err(LauncherError::WebserverFailed); } None => { sleep(Duration::from_millis(100)); } }; } // Graceful termination terminate( "webserver", webserver, Duration::from_secs(graceful_termination_timeout), ) .unwrap(); shutdown_shards(shutdown, &shutdown_receiver); exit_code }
text-generation-inference/launcher/src/main.rs/0
{ "file_path": "text-generation-inference/launcher/src/main.rs", "repo_id": "text-generation-inference", "token_count": 38271 }
317
{ nix-filter, buildPythonPackage, poetry-core, mypy-protobuf, awq-inference-engine, causal-conv1d, compressed-tensors, einops, exllamav2, flashinfer, flash-attn, flash-attn-layer-norm, flash-attn-v1, grpc-interceptor, grpcio-reflection, grpcio-status, grpcio-tools, hf-transfer, hf-xet, kernels, loguru, mamba-ssm, moe, opentelemetry-api, opentelemetry-exporter-otlp, opentelemetry-instrumentation-grpc, opentelemetry-semantic-conventions, outlines, paged-attention, peft, pillow, prometheus-client, punica-sgmv, py-cpuinfo, pydantic, quantization, quantization-eetq, rotary, safetensors, tokenizers, torch, sentencepiece, transformers, typer, }: let filter = nix-filter.lib; in buildPythonPackage { name = "text-generation-server"; src = filter { root = ../.; include = with filter; [ isDirectory (and (inDirectory "server") (or_ (matchExt "py") (matchExt "pyi"))) "server/pyproject.toml" (and (inDirectory "proto/v3") (matchExt "proto")) ]; }; pyproject = true; build-system = [ poetry-core ]; nativeBuildInputs = [ mypy-protobuf ]; pythonRelaxDeps = [ "einops" "huggingface-hub" "loguru" "opentelemetry-instrumentation-grpc" "pillow" "sentencepiece" "typer" ]; pythonRemoveDeps = [ "scipy" ]; dependencies = [ awq-inference-engine causal-conv1d compressed-tensors einops exllamav2 flashinfer flash-attn flash-attn-layer-norm grpc-interceptor grpcio-reflection grpcio-status grpcio-tools hf-transfer hf-xet kernels loguru mamba-ssm moe opentelemetry-api opentelemetry-exporter-otlp opentelemetry-instrumentation-grpc opentelemetry-semantic-conventions outlines paged-attention peft pillow prometheus-client punica-sgmv py-cpuinfo pydantic quantization quantization-eetq rotary safetensors sentencepiece tokenizers transformers typer ]; prePatch = '' python -m grpc_tools.protoc -Iproto/v3 --python_out=server/text_generation_server/pb \ --grpc_python_out=server/text_generation_server/pb --mypy_out=server/text_generation_server/pb proto/v3/generate.proto find server/text_generation_server/pb/ -type f -name "*.py" -print0 -exec sed -i -e 's/^\(import.*pb2\)/from . \1/g' {} \; touch server/text_generation_server/pb/__init__.py cd server ''; }
text-generation-inference/nix/server.nix/0
{ "file_path": "text-generation-inference/nix/server.nix", "repo_id": "text-generation-inference", "token_count": 1107 }
318
use crate::config::Config; use clap::ValueEnum; use csv::ReaderBuilder; use reqwest::header::HeaderMap; use serde::Serialize; use std::{ fs::File, io::{self, BufRead}, path::Path, process::Command, time::Duration, }; use uuid::Uuid; const TELEMETRY_URL: &str = "https://huggingface.co/api/telemetry/tgi"; #[derive(Copy, Clone, Debug, Serialize, ValueEnum)] pub enum UsageStatsLevel { On, NoStack, Off, } #[derive(Debug, Clone, Serialize)] pub struct UserAgent { pub uid: String, pub args: Args, pub env: Env, } impl UserAgent { pub fn new(reduced_args: Args) -> Self { Self { uid: Uuid::new_v4().to_string(), args: reduced_args, env: Env::new(), } } } #[derive(Serialize, Debug)] pub enum EventType { Start, Stop, Error, Ping, } #[derive(Debug, Serialize)] pub struct UsageStatsEvent { user_agent: UserAgent, event_type: EventType, #[serde(skip_serializing_if = "Option::is_none")] error_reason: Option<String>, } impl UsageStatsEvent { pub fn new(user_agent: UserAgent, event_type: EventType, error_reason: Option<String>) -> Self { Self { user_agent, event_type, error_reason, } } pub async fn send(&self) { let mut headers = HeaderMap::new(); headers.insert("Content-Type", "application/json".parse().unwrap()); let body = serde_json::to_string(&self).unwrap(); let client = reqwest::Client::new(); let _ = client .post(TELEMETRY_URL) .headers(headers) .body(body) .timeout(Duration::from_secs(10)) .send() .await; } } #[derive(Debug, Clone, Serialize)] pub struct Args { model_config: Option<Config>, tokenizer_class: Option<String>, max_concurrent_requests: usize, max_best_of: usize, max_stop_sequences: usize, max_top_n_tokens: u32, max_input_tokens: usize, max_total_tokens: usize, // waiting_served_ratio: f32, // max_batch_prefill_tokens: u32, // max_batch_total_tokens: Option<u32>, // max_waiting_tokens: usize, // max_batch_size: Option<usize>, revision: Option<String>, validation_workers: usize, disable_grammar_support: bool, max_client_batch_size: usize, usage_stats_level: UsageStatsLevel, backend_name: &'static str, origin: Option<String>, } impl Args { #[allow(clippy::too_many_arguments)] pub fn new( model_config: Option<Config>, tokenizer_class: Option<String>, max_concurrent_requests: usize, max_best_of: usize, max_stop_sequences: usize, max_top_n_tokens: u32, max_input_tokens: usize, max_total_tokens: usize, // waiting_served_ratio: f32, // max_batch_prefill_tokens: u32, // max_batch_total_tokens: Option<u32>, // max_waiting_tokens: usize, // max_batch_size: Option<usize>, revision: Option<String>, validation_workers: usize, disable_grammar_support: bool, max_client_batch_size: usize, usage_stats_level: UsageStatsLevel, backend_name: &'static str, origin: Option<String>, ) -> Self { Self { model_config, tokenizer_class, max_concurrent_requests, max_best_of, max_stop_sequences, max_top_n_tokens, max_input_tokens, max_total_tokens, // waiting_served_ratio, // max_batch_prefill_tokens, // max_batch_total_tokens, // max_waiting_tokens, // max_batch_size, revision, validation_workers, disable_grammar_support, max_client_batch_size, usage_stats_level, backend_name, origin, } } } /// This is more or less a copy of the code from the `text-generation-launcher` crate to avoid a dependency #[derive(Serialize, Debug, Clone)] pub struct Env { git_sha: &'static str, docker_label: &'static str, nvidia_info: Option<Vec<NvidiaSmiInfo>>, xpu_info: Option<Vec<XpuSmiInfo>>, hpu_info: Option<Vec<HpuSmiInfo>>, system_env: SystemInfo, } #[derive(Debug, Serialize, Clone)] struct NvidiaSmiInfo { name: String, pci_bus_id: String, driver_version: String, pstate: String, pcie_link_gen_max: String, pcie_link_gen_current: String, temperature_gpu: String, utilization_gpu: String, utilization_memory: String, memory_total: String, memory_free: String, memory_used: String, reset_status_reset_required: String, reset_status_drain_and_reset_recommended: String, compute_cap: String, ecc_errors_corrected_volatile_total: String, mig_mode_current: String, power_draw_instant: String, power_limit: String, } impl NvidiaSmiInfo { fn new() -> Option<Vec<NvidiaSmiInfo>> { let output = Command::new("nvidia-smi") .args([ "--query-gpu=name,pci.bus_id,driver_version,pstate,pcie.link.gen.max,pcie.link.gen.gpucurrent,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used,reset_status.reset_required,reset_status.drain_and_reset_recommended,compute_cap,ecc.errors.corrected.volatile.total,mig.mode.current,power.draw.instant,power.limit", "--format=csv" ]) .output() .ok()?; if !output.status.success() { return None; } let stdout = String::from_utf8(output.stdout).ok()?; let mut rdr = ReaderBuilder::new() .has_headers(true) .from_reader(stdout.as_bytes()); let mut infos = Vec::new(); for result in rdr.records() { let record = result.ok()?; infos.push(NvidiaSmiInfo { name: record[0].to_string(), pci_bus_id: record[1].to_string(), driver_version: record[2].to_string(), pstate: record[3].to_string(), pcie_link_gen_max: record[4].to_string(), pcie_link_gen_current: record[5].to_string(), temperature_gpu: record[6].to_string(), utilization_gpu: record[7].to_string(), utilization_memory: record[8].to_string(), memory_total: record[9].to_string(), memory_free: record[10].to_string(), memory_used: record[11].to_string(), reset_status_reset_required: record[12].to_string(), reset_status_drain_and_reset_recommended: record[13].to_string(), compute_cap: record[14].to_string(), ecc_errors_corrected_volatile_total: record[15].to_string(), mig_mode_current: record[16].to_string(), power_draw_instant: record[17].to_string(), power_limit: record[18].to_string(), }); } Some(infos) } } #[derive(Debug, Serialize, Clone)] struct XpuSmiInfo { device_id: usize, gpu_utilization: f32, gpu_power: f32, gpu_core_temperature: f32, gpu_memory_bandwidth_utilization: f32, } impl XpuSmiInfo { /// based on this https://github.com/intel/xpumanager/blob/master/doc/smi_user_guide.md#dump-the-device-statistics-in-csv-format fn new() -> Option<Vec<XpuSmiInfo>> { let output = Command::new("xpu-smi") .args([ "dump", "-d", "-1", "-m", "0,1,3,17", // Metrics IDs: GPU Utilization, GPU Power, GPU Core Temperature, GPU Memory Bandwidth Utilization "-n", "1", "-j", ]) .output() .ok()?; if !output.status.success() { return None; } let stdout = String::from_utf8(output.stdout).ok()?; let mut infos = Vec::new(); let json_data: serde_json::Value = match serde_json::from_str(&stdout) { Ok(data) => data, Err(_) => return None, }; if let Some(metrics_data) = json_data.as_array() { for entry in metrics_data { let device_id = entry["deviceId"].as_u64()? as usize; let gpu_utilization = entry["metrics"][0].as_f64()? as f32; let gpu_power = entry["metrics"][1].as_f64()? as f32; let gpu_core_temperature = entry["metrics"][2].as_f64()? as f32; let gpu_memory_bandwidth_utilization = entry["metrics"][3].as_f64()? as f32; infos.push(XpuSmiInfo { device_id, gpu_utilization, gpu_power, gpu_core_temperature, gpu_memory_bandwidth_utilization, }); } } Some(infos) } } #[derive(Debug, Serialize, Clone)] struct HpuSmiInfo { name: String, pci_bus_id: String, driver_version: String, temperature: String, utilization: String, memory_total: String, memory_free: String, memory_used: String, power_draw_instant: String, } impl HpuSmiInfo { fn new() -> Option<Vec<HpuSmiInfo>> { let output = Command::new("hl-smi") .args([ "--query-aip=name,bus_id,driver_version,temperature.aip,utilization.aip,memory.total,memory.free,memory.used,power.draw", "--format=csv" ]) .output() .ok()?; if !output.status.success() { return None; } let stdout = String::from_utf8(output.stdout).ok()?; let mut rdr = ReaderBuilder::new() .has_headers(true) .from_reader(stdout.as_bytes()); let mut infos = Vec::new(); for result in rdr.records() { let record = result.ok()?; infos.push(HpuSmiInfo { name: record[0].to_string(), pci_bus_id: record[1].to_string(), driver_version: record[2].to_string(), temperature: record[3].to_string(), utilization: record[4].to_string(), memory_total: record[5].to_string(), memory_free: record[6].to_string(), memory_used: record[7].to_string(), power_draw_instant: record[8].to_string(), }); } Some(infos) } } #[derive(Serialize, Debug, Clone)] pub struct SystemInfo { cpu_count: usize, cpu_type: String, total_memory: u64, architecture: String, platform: String, } impl SystemInfo { fn new() -> Self { let mut system = sysinfo::System::new_all(); system.refresh_all(); let cpu_count = system.cpus().len(); let cpu_type = system.cpus()[0].brand().to_string(); let total_memory = system.total_memory(); let architecture = std::env::consts::ARCH.to_string(); let platform = format!( "{}-{}-{}", std::env::consts::OS, std::env::consts::FAMILY, std::env::consts::ARCH ); Self { cpu_count, cpu_type, total_memory, architecture, platform, } } } impl Default for Env { fn default() -> Self { Self::new() } } impl Env { pub fn new() -> Self { Self { system_env: SystemInfo::new(), nvidia_info: NvidiaSmiInfo::new(), xpu_info: XpuSmiInfo::new(), hpu_info: HpuSmiInfo::new(), git_sha: option_env!("VERGEN_GIT_SHA").unwrap_or("N/A"), docker_label: option_env!("DOCKER_LABEL").unwrap_or("N/A"), } } pub fn is_hpu_device(&self) -> bool { self.hpu_info.is_some() } } pub fn is_container() -> io::Result<bool> { let path = Path::new("/proc/self/cgroup"); let file = File::open(path)?; let reader = io::BufReader::new(file); for line in reader.lines() { let line = line?; // Check for common container runtimes if line.contains("/docker/") || line.contains("/docker-") || line.contains("/kubepods/") || line.contains("/kubepods-") || line.contains("containerd") || line.contains("crio") || line.contains("podman") { return Ok(true); } } Ok(false) }
text-generation-inference/router/src/usage_stats.rs/0
{ "file_path": "text-generation-inference/router/src/usage_stats.rs", "repo_id": "text-generation-inference", "token_count": 6248 }
319
#!/usr/bin/env python3 import json import subprocess from typing import Dict, Union import toml # Special cases that have download URLs. SKIP = {"attention-kernels", "marlin-kernels", "moe-kernels"} def is_optional(info: Union[str, Dict[str, str]]) -> bool: return isinstance(info, dict) and "optional" in info and info["optional"] if __name__ == "__main__": with open("pyproject.toml") as f: pyproject = toml.load(f) nix_packages = json.loads( subprocess.run( ["nix", "develop", ".#server", "--command", "pip", "list", "--format=json"], stdout=subprocess.PIPE, ).stdout ) nix_packages = {pkg["name"]: pkg["version"] for pkg in nix_packages} packages = [] optional_packages = [] for package, info in pyproject["tool"]["poetry"]["dependencies"].items(): if package in nix_packages and package not in SKIP: if is_optional(info): optional_packages.append(f'"{package}@^{nix_packages[package]}"') else: packages.append(f'"{package}@^{nix_packages[package]}"') print(f"poetry add {' '.join(packages)}") print(f"poetry add --optional {' '.join(optional_packages)}")
text-generation-inference/server/bounds-from-nix.py/0
{ "file_path": "text-generation-inference/server/bounds-from-nix.py", "repo_id": "text-generation-inference", "token_count": 505 }
320
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _tuning_h #define _tuning_h struct ExLlamaTuning { int matmul_recons_thd; bool matmul_fused_remap; bool matmul_no_half2; }; #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/tuning.h/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/tuning.h", "repo_id": "text-generation-inference", "token_count": 106 }
321
#ifndef _qdq_5_cuh #define _qdq_5_cuh #include "qdq_util.cuh" #include "../../config.h" #if QMODE_5BIT == 1 // Permutation: // // v5555533 33311111 u4444422 22200000 (u, v lsb) // vbbbbb99 99977777 uaaaaa88 88866666 // vhhhhhff fffddddd ugggggee eeeccccc // vnnnnnll llljjjjj ummmmmkk kkkiiiii // vtttttrr rrrppppp usssssqq qqqooooo __forceinline__ __device__ void shuffle_5bit_32 ( uint32_t* q, int stride ) { uint32_t qa = q[0 * stride]; uint32_t qb = q[1 * stride]; uint32_t qc = q[2 * stride]; uint32_t qd = q[3 * stride]; uint32_t qe = q[4 * stride]; // qa: 66555554 44443333 32222211 11100000 // qb: ccccbbbb baaaaa99 99988888 77777666 // qc: jiiiiihh hhhggggg fffffeee eedddddc // qd: pppooooo nnnnnmmm mmlllllk kkkkjjjj // qe: vvvvvuuu uuttttts ssssrrrr rqqqqqpp uint32_t qf = qe >> 22; qe <<= 8; qe |= qd >> 24; qd <<= 6; qd |= qc >> 26; qc <<= 4; qc |= qb >> 28; qb <<= 2; qb |= qa >> 30; // qa: 555554 44443333 32222211 11100000 // qb: bbbbba aaaa9999 98888877 77766666 // qc: hhhhhg ggggffff feeeeedd dddccccc // qd: nnnnnm mmmmllll lkkkkkjj jjjiiiii // qe: ttttts ssssrrrr rqqqqqpp pppooooo // qf: vv vvvuuuuu uint32_t za = 0; uint32_t zb = 0; uint32_t zc = 0; uint32_t zd = 0; uint32_t ze = 0; for (int i = 0; i < 3; i++) { uint32_t t0 = qa & 0x1f; uint32_t t1 = (qa & 0x3e0) >> 5; qa >>= 10; za |= (t0 << (i * 5)); za |= (t1 << (i * 5 + 16)); } for (int i = 0; i < 3; i++) { uint32_t t0 = qb & 0x1f; uint32_t t1 = (qb & 0x3e0) >> 5; qb >>= 10; zb |= (t0 << (i * 5)); zb |= (t1 << (i * 5 + 16)); } for (int i = 0; i < 3; i++) { uint32_t t0 = qc & 0x1f; uint32_t t1 = (qc & 0x3e0) >> 5; qc >>= 10; zc |= (t0 << (i * 5)); zc |= (t1 << (i * 5 + 16)); } for (int i = 0; i < 3; i++) { uint32_t t0 = qd & 0x1f; uint32_t t1 = (qd & 0x3e0) >> 5; qd >>= 10; zd |= (t0 << (i * 5)); zd |= (t1 << (i * 5 + 16)); } for (int i = 0; i < 3; i++) { uint32_t t0 = qe & 0x1f; uint32_t t1 = (qe & 0x3e0) >> 5; qe >>= 10; ze |= (t0 << (i * 5)); ze |= (t1 << (i * 5 + 16)); } // za: 5555533 33311111 4444422 22200000 // zb: bbbbb99 99977777 aaaaa88 88866666 // zc: hhhhhff fffddddd gggggee eeeccccc // zd: nnnnnll llljjjjj mmmmmkk kkkiiiii // ze: tttttrr rrrppppp sssssqq qqqooooo // qf: vv vvvuuuuu za |= ((qf & 0x001) >> 0) << 15; zb |= ((qf & 0x002) >> 1) << 15; zc |= ((qf & 0x004) >> 2) << 15; zd |= ((qf & 0x008) >> 3) << 15; ze |= ((qf & 0x010) >> 4) << 15; za |= ((qf & 0x020) >> 5) << 31; zb |= ((qf & 0x040) >> 6) << 31; zc |= ((qf & 0x080) >> 7) << 31; zd |= ((qf & 0x100) >> 8) << 31; ze |= ((qf & 0x200) >> 9) << 31; // za: v5555533 33311111 u4444422 22200000 (u, v lsb) // zb: vbbbbb99 99977777 uaaaaa88 88866666 // zc: vhhhhhff fffddddd ugggggee eeeccccc // zd: vnnnnnll llljjjjj ummmmmkk kkkiiiii // ze: vtttttrr rrrppppp usssssqq qqqooooo q[0 * stride] = za; q[1 * stride] = zb; q[2 * stride] = zc; q[3 * stride] = zd; q[4 * stride] = ze; } __forceinline__ __device__ void dequant_5bit_32 ( const uint32_t q_0, const uint32_t q_1, const uint32_t q_2, const uint32_t q_3, const uint32_t q_4, half2 (&dq)[16], int stride ) { const uint32_t c0 = 0x64006400; const half y32_ = __float2half_rn(1.0f / 32.0f); const half2 y32 = __halves2half2(y32_, y32_); const half z1_ = __float2half_rn(-1024.0f - 16.0f); const half z32_ = __float2half_rn(-1024.0f / 32.0f - 16.0f); const half2 z1 = __halves2half2(z1_, z1_); const half2 z32 = __halves2half2(z32_, z32_); uint32_t qa = q_0; uint32_t qb = q_1; uint32_t qc = q_2; uint32_t qd = q_3; uint32_t qe = q_4; half2_uint32 q0 ((qa & 0x001f001f) | c0); // half2(q[ 0], q[ 1]) + 1024 half2_uint32 q1 ((qa & 0x03e003e0) | c0); // half2(q[ 2], q[ 3]) * 32 + 1024 qa >>= 10; half2_uint32 q2 ((qa & 0x001f001f) | c0); // half2(q[ 4], q[ 5]) + 1024 qa >>= 5; qa &= 0x00010001; half2_uint32 q3 ((qb & 0x001f001f) | c0); // half2(q[ 6], q[ 7]) + 1024 half2_uint32 q4 ((qb & 0x03e003e0) | c0); // half2(q[ 8], q[ 9]) * 32 + 1024 qb >>= 10; half2_uint32 q5 ((qb & 0x001f001f) | c0); // half2(q[10], q[11]) + 1024 qb >>= 4; qb &= 0x00020002; half2_uint32 q6 ((qc & 0x001f001f) | c0); // half2(q[12], q[13]) + 1024 half2_uint32 q7 ((qc & 0x03e003e0) | c0); // half2(q[14], q[15]) * 32 + 1024 qc >>= 10; half2_uint32 q8 ((qc & 0x001f001f) | c0); // half2(q[16], q[17]) + 1024 qc >>= 3; qc &= 0x00040004; half2_uint32 q9 ((qd & 0x001f001f) | c0); // half2(q[18], q[19]) + 1024 half2_uint32 q10((qd & 0x03e003e0) | c0); // half2(q[20], q[21]) * 32 + 1024 qd >>= 10; half2_uint32 q11((qd & 0x001f001f) | c0); // half2(q[22], q[23]) + 1024 qd >>= 2; qd &= 0x00080008; half2_uint32 q12((qe & 0x001f001f) | c0); // half2(q[24], q[25]) + 1024 half2_uint32 q13((qe & 0x03e003e0) | c0); // half2(q[26], q[27]) * 32 + 1024 qe >>= 10; half2_uint32 q14((qe & 0x001f001f) | c0); // half2(q[28], q[29]) + 1024 qe >>= 1; qe &= 0x00100010; half2_uint32 q15((qa | qb | qc | qd | qe) | c0); dq[ 0] = __hadd2( q0.as_half2, z1); dq[ 1] = __hfma2( q1.as_half2, y32, z32); dq[ 2] = __hadd2( q2.as_half2, z1); dq[ 3] = __hadd2( q3.as_half2, z1); dq[ 4] = __hfma2( q4.as_half2, y32, z32); dq[ 5] = __hadd2( q5.as_half2, z1); dq[ 6] = __hadd2( q6.as_half2, z1); dq[ 7] = __hfma2( q7.as_half2, y32, z32); dq[ 8] = __hadd2( q8.as_half2, z1); dq[ 9] = __hadd2( q9.as_half2, z1); dq[10] = __hfma2(q10.as_half2, y32, z32); dq[11] = __hadd2(q11.as_half2, z1); dq[12] = __hadd2(q12.as_half2, z1); dq[13] = __hfma2(q13.as_half2, y32, z32); dq[14] = __hadd2(q14.as_half2, z1); dq[15] = __hadd2(q15.as_half2, z1); } #else __forceinline__ __device__ void shuffle_5bit_32 ( uint32_t* q, int stride ) { } __forceinline__ __device__ void dequant_5bit_32 ( const uint32_t q_0, const uint32_t q_1, const uint32_t q_2, const uint32_t q_3, const uint32_t q_4, half2 (&dq)[16], int stride ) { half dqh[32]; for (int i = 0; i < 6; i++) dqh[ i] = dq_ns(exb( q_0, i * 5 , 0x1f), 16); dqh[ 6 ] = dq_ns(exb(q_1, q_0, 30, 0x1f), 16); for (int i = 0; i < 5; i++) dqh[ 7 + i] = dq_ns(exb( q_1, i * 5 + 3, 0x1f), 16); dqh[12 ] = dq_ns(exb(q_2, q_1, 28, 0x1f), 16); for (int i = 0; i < 6; i++) dqh[13 + i] = dq_ns(exb( q_2, i * 5 + 1, 0x1f), 16); dqh[19 ] = dq_ns(exb(q_3, q_2, 31, 0x1f), 16); for (int i = 0; i < 5; i++) dqh[20 + i] = dq_ns(exb( q_3, i * 5 + 4, 0x1f), 16); dqh[25 ] = dq_ns(exb(q_4, q_3, 29, 0x1f), 16); for (int i = 0; i < 6; i++) dqh[26 + i] = dq_ns(exb( q_4, i * 5 + 2, 0x1f), 16); for (int i = 0; i < 16; i++) dq[i] = __halves2half2(dqh[i * 2], dqh[i * 2 + 1]); } #endif #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_5.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/quant/qdq_5.cuh", "repo_id": "text-generation-inference", "token_count": 4272 }
322
import pytest import torch from copy import copy from transformers import AutoTokenizer from text_generation_server.pb import generate_pb2 from text_generation_server.models.causal_lm import CausalLM, CausalLMBatch @pytest.fixture(scope="session") def default_causal_lm(): return CausalLM.fallback("gpt2") @pytest.fixture(scope="session") def gpt2_tokenizer(): tokenizer = AutoTokenizer.from_pretrained("gpt2", padding_side="left") tokenizer.pad_token_id = 50256 return tokenizer @pytest.fixture def default_pb_request(default_pb_parameters, default_pb_stop_parameters): return generate_pb2.Request( id=0, inputs="Test", input_chunks=generate_pb2.Input(chunks=[generate_pb2.InputChunk(text="Test")]), prefill_logprobs=True, truncate=100, parameters=default_pb_parameters, stopping_parameters=default_pb_stop_parameters, ) @pytest.fixture def default_pb_batch(default_pb_request): return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1) @pytest.fixture def default_causal_lm_batch(default_pb_batch, gpt2_tokenizer): return CausalLMBatch.from_pb( default_pb_batch, gpt2_tokenizer, torch.float32, torch.device("cpu") ) @pytest.fixture def default_multi_requests_causal_lm_batch(default_pb_request, gpt2_tokenizer): req_0 = copy(default_pb_request) req_0.id = 1 req_1 = default_pb_request req_1.id = 2 req_1.stopping_parameters.max_new_tokens = 5 batch_pb = generate_pb2.Batch(id=1, requests=[req_0, req_1], size=2) return CausalLMBatch.from_pb( batch_pb, gpt2_tokenizer, torch.float32, torch.device("cpu") ) def test_batch_from_pb(default_pb_batch, default_causal_lm_batch): batch = default_causal_lm_batch assert batch.batch_id == default_pb_batch.id assert batch.requests == default_pb_batch.requests assert len(batch.input_ids) == default_pb_batch.size assert batch.input_ids[0][-1] == 14402 assert torch.all(batch.input_ids[0][:-1] == 50256) assert batch.attention_mask[0, 0] == 1 assert torch.all(batch.attention_mask[0, 1:] == 0) assert batch.past_key_values is None assert all( [ torch.equal(input_ids, all_input_ids[:, 0]) for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids) ] ) assert batch.input_lengths == [1] assert len(batch) == default_pb_batch.size assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch) assert batch.max_input_length == batch.input_lengths[0] def test_batch_concatenate_no_prefill(default_causal_lm_batch): with pytest.raises(ValueError): CausalLMBatch.concatenate([default_causal_lm_batch, default_causal_lm_batch]) def test_causal_lm_batch_type(default_causal_lm): assert default_causal_lm.batch_type == CausalLMBatch def test_causal_lm_generate_token(default_causal_lm, default_causal_lm_batch): sequence_length = len(default_causal_lm_batch.all_input_ids[0]) generations, next_batch, _ = default_causal_lm.generate_token( default_causal_lm_batch ) assert len(generations) == len(next_batch) assert isinstance(next_batch, CausalLMBatch) assert len(next_batch.all_input_ids) == len(next_batch) assert len(next_batch.all_input_ids[0]) == sequence_length + 1 assert len(next_batch.attention_mask[0]) == 11 assert next_batch.all_input_ids[0][-1] == 13 assert next_batch.all_input_ids[0][-2] == 14402 assert torch.all(next_batch.all_input_ids[0][:-2] == 50256) assert torch.all(next_batch.attention_mask[0][0:2] == 1) assert torch.all(next_batch.attention_mask[0][2:] == 0) assert next_batch.input_ids.shape == (len(next_batch), 1) assert next_batch.input_ids[0, 0] == 13 assert next_batch.input_lengths == [2] assert next_batch.max_input_length == next_batch.input_lengths[0] assert next_batch.past_key_values is not None assert all( [p[0].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values] ) assert all( [p[1].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values] ) assert all([generation.generated_text is None for generation in generations]) assert all([len(generation.prefill_tokens) == 1 for generation in generations]) assert all( [ token_id.item() == 13 for generation in generations for token_id in generation.tokens.token_ids ] ) assert all( [ token_text == "." for generation in generations for token_text in generation.tokens.texts ] ) assert generations[0].request_id == 0 def test_causal_lm_generate_token_completion( default_causal_lm, default_causal_lm_batch ): next_batch = default_causal_lm_batch for _ in range(default_causal_lm_batch.stopping_criterias[0].max_new_tokens - 1): generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 assert generations[0].generated_text.text == ".java:784) at net.minecraft." assert generations[0].request_id == default_causal_lm_batch.requests[0].id assert ( generations[0].generated_text.generated_tokens == default_causal_lm_batch.stopping_criterias[0].max_new_tokens ) def test_causal_lm_generate_token_completion_multi( default_causal_lm, default_multi_requests_causal_lm_batch ): next_batch = default_multi_requests_causal_lm_batch for i in range( default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 1 ): generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 assert generations[1].generated_text.text == ".java:784)" assert ( generations[1].request_id == default_multi_requests_causal_lm_batch.requests[1].id ) assert ( generations[1].generated_text.generated_tokens == default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens ) # Copy stopping_criterias before filtering stopping_criterias = ( default_multi_requests_causal_lm_batch.stopping_criterias.copy() ) next_batch = next_batch.filter([next_batch.requests[0].id]) for _ in range( stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1 ): generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 assert generations[0].generated_text.text == ".java:784) at net.minecraft." assert ( generations[0].request_id == default_multi_requests_causal_lm_batch.requests[0].id ) assert ( generations[0].generated_text.generated_tokens == default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens ) def test_batch_concatenate( default_causal_lm, default_causal_lm_batch, default_multi_requests_causal_lm_batch ): next_batch_0 = default_causal_lm_batch _, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0) _, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0) next_batch_1 = default_multi_requests_causal_lm_batch _, next_batch_1, _ = default_causal_lm.generate_token(next_batch_1) # Clone past_key_values before concatenating to compare after, # because they are removed from the concatenated batches next_batch_0_past_key_values = [ (k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values ] next_batch_1_past_key_values = [ (k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values ] next_batch = CausalLMBatch.concatenate([next_batch_0, next_batch_1]) assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0]) assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0]) assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1]) assert torch.all( next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1 ) assert torch.all( next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1 ) assert torch.all(next_batch.attention_mask[1:, 3:] == 0) assert next_batch.batch_id == 0 assert next_batch.input_ids[0, 0] == 12355 assert torch.all(next_batch.input_ids[1:] == 13) assert next_batch.input_lengths == [3, 2, 2] assert next_batch.max_input_length == 3 assert next_batch.requests[0] == next_batch_0.requests[0] assert next_batch.requests[1:] == list(next_batch_1.requests) assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0] assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0] assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias assert next_batch.past_key_values is not None assert all([p[0].shape == (3, 12, 2, 64) for p in next_batch.past_key_values]) assert all([p[1].shape == (3, 12, 2, 64) for p in next_batch.past_key_values]) for i, past in enumerate(next_batch.past_key_values): assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:], past[0][0]) assert torch.equal( next_batch_1_past_key_values[i][0][:, :, -1:], past[0][1:, :, -1:, :] ) assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:], past[1][0]) assert torch.equal( next_batch_1_past_key_values[i][1][:, :, -1:], past[1][1:, :, -1:, :] ) for _ in range( default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 2 ): generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 3 assert generations[2].generated_text.text == ".java:784)" assert ( generations[2].request_id == default_multi_requests_causal_lm_batch.requests[1].id ) assert ( generations[2].generated_text.generated_tokens == default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens ) next_batch = next_batch.filter( [next_batch.requests[0].id, next_batch.requests[1].id] ) for _ in range( default_causal_lm_batch.stopping_criterias[0].max_new_tokens - default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 2 ): generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is not None assert len(generations) == 2 assert generations[0].generated_text.text == ".java:784) at net.minecraft." assert generations[0].request_id == default_causal_lm_batch.requests[0].id assert ( generations[0].generated_text.generated_tokens == default_causal_lm_batch.stopping_criterias[0].max_new_tokens ) next_batch = next_batch.filter([next_batch.requests[1].id]) for _ in range( default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens - default_causal_lm_batch.stopping_criterias[0].max_new_tokens - default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 4 ): generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert len(generations) == len(next_batch) generations, next_batch, _ = default_causal_lm.generate_token(next_batch) assert next_batch is None assert len(generations) == 1 assert generations[0].generated_text.text == ".java:784) at net.minecraft." assert ( generations[0].request_id == default_multi_requests_causal_lm_batch.requests[0].id ) assert ( generations[0].generated_text.generated_tokens == default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens )
text-generation-inference/server/tests/models/test_causal_lm.py/0
{ "file_path": "text-generation-inference/server/tests/models/test_causal_lm.py", "repo_id": "text-generation-inference", "token_count": 5390 }
323
from dataclasses import dataclass import bitsandbytes as bnb import torch from bitsandbytes.nn import Int8Params, Params4bit from text_generation_server.utils.weights import UnquantizedWeight @dataclass class BNBWeight(UnquantizedWeight): weight: torch.Tensor def get_linear(self, bias: torch.Tensor): return Linear8bitLt(self.weight, bias, has_fp16_weights=False, threshold=6.0) class Linear8bitLt(torch.nn.Module): def __init__( self, weight, bias, has_fp16_weights=True, memory_efficient_backward=False, threshold=0.0, index=None, ): super().__init__() assert ( not memory_efficient_backward ), "memory_efficient_backward is no longer required and the argument is deprecated in 0.37.0 and will be removed in 0.39.0" self.state = bnb.MatmulLtState() self.index = index # Necessary for stacked layers self.state.threshold = threshold self.state.has_fp16_weights = has_fp16_weights self.state.memory_efficient_backward = memory_efficient_backward if threshold > 0.0 and not has_fp16_weights: self.state.use_pool = True self.weight = Int8Params( weight.data, has_fp16_weights=has_fp16_weights, requires_grad=has_fp16_weights, ) self.weight.cuda(weight.device) self.bias = bias def init_8bit_state(self): self.state.CB = self.weight.CB self.state.SCB = self.weight.SCB self.weight.CB = None self.weight.SCB = None def forward(self, x: torch.Tensor): self.state.is_training = self.training if self.weight.CB is not None: self.init_8bit_state() # weights are cast automatically as Int8Params, but the bias has to be cast manually if self.bias is not None and self.bias.dtype != x.dtype: self.bias.data = self.bias.data.to(x.dtype) out = bnb.matmul(x, self.weight, bias=self.bias, state=self.state) if not self.state.has_fp16_weights: if self.state.CB is not None and self.state.CxB is not None: # we converted 8-bit row major to turing/ampere format in the first inference pass # we no longer need the row-major weight del self.state.CB self.weight.data = self.state.CxB return out @dataclass class BNBFP4Weight(UnquantizedWeight): weight: torch.Tensor def get_linear(self, bias: torch.Tensor): return Linear4bit(self.weight, bias, quant_type="fp4") @dataclass class BNBNF4Weight(UnquantizedWeight): weight: torch.Tensor def get_linear(self, bias: torch.Tensor): return Linear4bit(self.weight, bias, quant_type="nf4") class Linear4bit(torch.nn.Module): def __init__(self, weight, bias, quant_type): super().__init__() self.weight = Params4bit( weight.data, requires_grad=False, compress_statistics=True, quant_type=quant_type, ) self.compute_dtype = None self.weight.cuda(weight.device) self.bias = bias def forward(self, x: torch.Tensor): # weights are cast automatically as Int8Params, but the bias has to be cast manually if self.bias is not None and self.bias.dtype != x.dtype: self.bias.data = self.bias.data.to(x.dtype) if getattr(self.weight, "quant_state", None) is None: print( "FP4 quantization state not initialized. Please call .cuda() or .to(device) on the LinearFP4 layer first." ) inp_dtype = x.dtype if self.compute_dtype is not None: x = x.to(self.compute_dtype) bias = None if self.bias is None else self.bias.to(self.compute_dtype) out = bnb.matmul_4bit( x, self.weight.t(), bias=bias, quant_state=self.weight.quant_state ) out = out.to(inp_dtype) return out
text-generation-inference/server/text_generation_server/layers/bnb.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/bnb.py", "repo_id": "text-generation-inference", "token_count": 1825 }
324
import time import torch.nn as nn import math import json import os import torch import transformers from texttable import Texttable from transformers import AutoModelForCausalLM, AutoConfig, AutoTokenizer from huggingface_hub import HfApi from accelerate import init_empty_weights from text_generation_server.utils import initialize_torch_distributed, Weights from text_generation_server.utils.hub import weight_files from text_generation_server.layers.gptq import QuantLinear from loguru import logger from typing import Optional from text_generation_server.layers.gptq.utils import torch_snr_error from text_generation_server.utils.weights import DefaultWeightsLoader, UnquantizedWeight DEV = torch.device("cuda:0") class Quantizer(nn.Module): def __init__(self, shape=1): super(Quantizer, self).__init__() self.register_buffer("maxq", torch.tensor(0)) self.register_buffer("scale", torch.zeros(shape)) self.register_buffer("zero", torch.zeros(shape)) def configure( self, bits, perchannel=False, sym=True, mse=False, norm=2.4, grid=100, maxshrink=0.8, trits=False, ): self.maxq = torch.tensor(2**bits - 1) self.perchannel = perchannel self.sym = sym self.mse = mse self.norm = norm self.grid = grid self.maxshrink = maxshrink if trits: self.maxq = torch.tensor(-1) self.scale = torch.zeros_like(self.scale) def _quantize(self, x, scale, zero, maxq): if maxq < 0: return (x > scale / 2).float() * scale + (x < zero / 2).float() * zero q = torch.clamp(torch.round(x / scale) + zero, 0, maxq) return scale * (q - zero) def find_params(self, x, weight=False): dev = x.device self.maxq = self.maxq.to(dev) shape = x.shape if self.perchannel: if weight: x = x.flatten(1) else: if len(shape) == 4: x = x.permute([1, 0, 2, 3]) x = x.flatten(1) if len(shape) == 3: x = x.reshape((-1, shape[-1])).t() if len(shape) == 2: x = x.t() else: x = x.flatten().unsqueeze(0) tmp = torch.zeros(x.shape[0], device=dev) xmin = torch.minimum(x.min(1)[0], tmp) xmax = torch.maximum(x.max(1)[0], tmp) if self.sym: xmax = torch.maximum(torch.abs(xmin), xmax) tmp = xmin < 0 if torch.any(tmp): xmin[tmp] = -xmax[tmp] tmp = (xmin == 0) & (xmax == 0) xmin[tmp] = -1 xmax[tmp] = +1 if self.maxq < 0: self.scale = xmax self.zero = xmin else: self.scale = (xmax - xmin) / self.maxq if self.sym: self.zero = torch.full_like(self.scale, (self.maxq + 1) / 2) else: self.zero = torch.round(-xmin / self.scale) if self.mse: best = torch.full([x.shape[0]], float("inf"), device=dev) for i in range(int(self.maxshrink * self.grid)): p = 1 - i / self.grid xmin1 = p * xmin xmax1 = p * xmax scale1 = (xmax1 - xmin1) / self.maxq zero1 = torch.round(-xmin1 / scale1) if not self.sym else self.zero q = self._quantize( x, scale1.unsqueeze(1), zero1.unsqueeze(1), self.maxq ) q -= x q.abs_() q.pow_(self.norm) err = torch.sum(q, 1) tmp = err < best if torch.any(tmp): best[tmp] = err[tmp] self.scale[tmp] = scale1[tmp] self.zero[tmp] = zero1[tmp] if not self.perchannel: if weight: tmp = shape[0] else: tmp = shape[1] if len(shape) != 3 else shape[2] self.scale = self.scale.repeat(tmp) self.zero = self.zero.repeat(tmp) if weight: shape = [-1] + [1] * (len(shape) - 1) self.scale = self.scale.reshape(shape) self.zero = self.zero.reshape(shape) return if len(shape) == 4: self.scale = self.scale.reshape((1, -1, 1, 1)) self.zero = self.zero.reshape((1, -1, 1, 1)) if len(shape) == 3: self.scale = self.scale.reshape((1, 1, -1)) self.zero = self.zero.reshape((1, 1, -1)) if len(shape) == 2: self.scale = self.scale.unsqueeze(0) self.zero = self.zero.unsqueeze(0) def quantize(self, x): if self.ready(): return self._quantize(x, self.scale, self.zero, self.maxq) return x def enabled(self): return self.maxq > 0 def ready(self): return torch.all(self.scale != 0) class GPTQ: def __init__(self, layer, observe=False): self.layer = layer self.dev = self.layer.weight.device W = layer.weight.data.clone() if isinstance(self.layer, nn.Conv2d): W = W.flatten(1) if isinstance(self.layer, transformers.Conv1D): W = W.t() self.rows = W.shape[0] self.columns = W.shape[1] self.H = torch.zeros((self.columns, self.columns), device=self.dev) self.nsamples = 0 self.quantizer = Quantizer() self.observe = observe def add_batch(self, inp, out): # Hessian H = 2 X XT + λ I if self.observe: self.inp1 = inp self.out1 = out else: self.inp1 = None self.out1 = None if len(inp.shape) == 2: inp = inp.unsqueeze(0) tmp = inp.shape[0] if isinstance(self.layer, nn.Linear) or isinstance( self.layer, transformers.Conv1D ): if len(inp.shape) == 3: inp = inp.reshape((-1, inp.shape[-1])) inp = inp.t() if isinstance(self.layer, nn.Conv2d): unfold = nn.Unfold( self.layer.kernel_size, dilation=self.layer.dilation, padding=self.layer.padding, stride=self.layer.stride, ) inp = unfold(inp) inp = inp.permute([1, 0, 2]) inp = inp.flatten(1) self.H *= self.nsamples / (self.nsamples + tmp) self.nsamples += tmp # inp = inp.float() inp = math.sqrt(2 / self.nsamples) * inp.float() # self.H += 2 / self.nsamples * inp.matmul(inp.t()) self.H += inp.matmul(inp.t()) def print_loss(self, name, q_weight, weight_error, timecost): table = Texttable() length = 28 name = ( (name + " " * (length - len(name))) if len(name) <= length else name[:length] ) table.header(["name", "weight_error", "fp_inp_SNR", "q_inp_SNR", "time"]) # assign weight self.layer.weight.data = q_weight.reshape(self.layer.weight.shape).to( self.layer.weight.data.dtype ) if self.inp1 is not None: # quantize input to int8 quantizer = Quantizer() quantizer.configure(8, perchannel=False, sym=True, mse=False) quantizer.find_params(self.inp1) q_in = quantizer.quantize(self.inp1).type(torch.float16) q_out = self.layer(q_in) # get kinds of SNR q_SNR = torch_snr_error(q_out, self.out1).item() fp_SNR = torch_snr_error(self.layer(self.inp1), self.out1).item() else: q_SNR = "-" fp_SNR = "-" table.add_row([name, weight_error, fp_SNR, q_SNR, timecost]) print(table.draw().split("\n")[-2]) def fasterquant( self, blocksize=128, percdamp=0.01, groupsize=-1, act_order=False, name="" ): self.layer.to(self.dev) W = self.layer.weight.data.clone() if isinstance(self.layer, nn.Conv2d): W = W.flatten(1) if isinstance(self.layer, transformers.Conv1D): W = W.t() W = W.float() tick = time.time() if not self.quantizer.ready(): self.quantizer.find_params(W, weight=True) H = self.H if not self.observe: del self.H dead = torch.diag(H) == 0 H[dead, dead] = 1 W[:, dead] = 0 if act_order: perm = torch.argsort(torch.diag(H), descending=True) W = W[:, perm] H = H[perm][:, perm] Losses = torch.zeros_like(W) Q = torch.zeros_like(W) damp = percdamp * torch.mean(torch.diag(H)) diag = torch.arange(self.columns, device=self.dev) H[diag, diag] += damp H = torch.linalg.cholesky(H) H = torch.cholesky_inverse(H) try: H = torch.linalg.cholesky(H, upper=True) except Exception: # Addition because Falcon fails on h_to_4h H = torch.linalg.cholesky( H + 1e-5 * torch.eye(H.shape[0]).to(H.device), upper=True ) Hinv = H g_idx = [] scale = [] zero = [] now_idx = 1 for i1 in range(0, self.columns, blocksize): i2 = min(i1 + blocksize, self.columns) count = i2 - i1 W1 = W[:, i1:i2].clone() Q1 = torch.zeros_like(W1) Err1 = torch.zeros_like(W1) Losses1 = torch.zeros_like(W1) Hinv1 = Hinv[i1:i2, i1:i2] for i in range(count): w = W1[:, i] d = Hinv1[i, i] if groupsize != -1: if (i1 + i) % groupsize == 0: self.quantizer.find_params( W[:, (i1 + i) : (i1 + i + groupsize)], weight=True ) if ((i1 + i) // groupsize) - now_idx == -1: scale.append(self.quantizer.scale) zero.append(self.quantizer.zero) now_idx += 1 q = self.quantizer.quantize(w.unsqueeze(1)).flatten() Q1[:, i] = q Losses1[:, i] = (w - q) ** 2 / d**2 err1 = (w - q) / d W1[:, i:] -= err1.unsqueeze(1).matmul(Hinv1[i, i:].unsqueeze(0)) Err1[:, i] = err1 Q[:, i1:i2] = Q1 Losses[:, i1:i2] = Losses1 / 2 W[:, i2:] -= Err1.matmul(Hinv[i1:i2, i2:]) torch.cuda.synchronize() error = torch.sum(Losses).item() groupsize = groupsize if groupsize != -1 else self.columns g_idx = [i // groupsize for i in range(self.columns)] g_idx = torch.tensor(g_idx, dtype=torch.int32, device=Q.device) if act_order: invperm = torch.argsort(perm) Q = Q[:, invperm] g_idx = g_idx[invperm] if isinstance(self.layer, transformers.Conv1D): Q = Q.t() self.print_loss( name=name, q_weight=Q, weight_error=error, timecost=(time.time() - tick) ) if scale == []: scale.append(self.quantizer.scale) zero.append(self.quantizer.zero) scale = torch.cat(scale, dim=1) zero = torch.cat(zero, dim=1) return scale, zero, g_idx, error def free(self): self.inp1 = None self.out1 = None self.H = None self.Losses = None self.Trace = None torch.cuda.empty_cache() def get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset("wikitext", "wikitext-2-raw-v1", split="train") testdata = load_dataset("wikitext", "wikitext-2-raw-v1", split="test") try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except Exception: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) trainenc = tokenizer("\n\n".join(traindata["text"]), return_tensors="pt") testenc = tokenizer("\n\n".join(testdata["text"]), return_tensors="pt") import random random.seed(seed) trainloader = [] for _ in range(nsamples): i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) return trainloader, testenc def get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset("ptb_text_only", "penn_treebank", split="train") valdata = load_dataset("ptb_text_only", "penn_treebank", split="validation") try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except Exception: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) trainenc = tokenizer("\n\n".join(traindata["sentence"]), return_tensors="pt") testenc = tokenizer("\n\n".join(valdata["sentence"]), return_tensors="pt") import random random.seed(seed) trainloader = [] for _ in range(nsamples): i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) return trainloader, testenc def get_c4(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset( "allenai/c4", "allenai--c4", data_files={"train": "en/c4-train.00000-of-01024.json.gz"}, split="train", use_auth_token=False, ) valdata = load_dataset( "allenai/c4", "allenai--c4", data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"}, split="validation", use_auth_token=False, ) try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except Exception: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) import random random.seed(seed) trainloader = [] for _ in range(nsamples): while True: i = random.randint(0, len(traindata) - 1) trainenc = tokenizer(traindata[i]["text"], return_tensors="pt") if trainenc.input_ids.shape[1] >= seqlen: break i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) import random random.seed(0) valenc = [] for _ in range(256): while True: i = random.randint(0, len(valdata) - 1) tmp = tokenizer(valdata[i]["text"], return_tensors="pt") if tmp.input_ids.shape[1] >= seqlen: break i = random.randint(0, tmp.input_ids.shape[1] - seqlen - 1) j = i + seqlen valenc.append(tmp.input_ids[:, i:j]) valenc = torch.hstack(valenc) class TokenizerWrapper: def __init__(self, input_ids): self.input_ids = input_ids valenc = TokenizerWrapper(valenc) return trainloader, valenc def get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset("ptb_text_only", "penn_treebank", split="train") testdata = load_dataset("ptb_text_only", "penn_treebank", split="test") try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except Exception: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) trainenc = tokenizer(" ".join(traindata["sentence"]), return_tensors="pt") testenc = tokenizer(" ".join(testdata["sentence"]), return_tensors="pt") import random random.seed(seed) trainloader = [] for _ in range(nsamples): i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) return trainloader, testenc def get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code): from datasets import load_dataset traindata = load_dataset( "allenai/c4", "allenai--c4", data_files={"train": "en/c4-train.00000-of-01024.json.gz"}, split="train", ) valdata = load_dataset( "allenai/c4", "allenai--c4", data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"}, split="validation", ) try: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=False, trust_remote_code=trust_remote_code ) except Exception: tokenizer = AutoTokenizer.from_pretrained( model_id, use_fast=True, trust_remote_code=trust_remote_code ) import random random.seed(seed) trainloader = [] for _ in range(nsamples): while True: i = random.randint(0, len(traindata) - 1) trainenc = tokenizer(traindata[i]["text"], return_tensors="pt") if trainenc.input_ids.shape[1] >= seqlen: break i = random.randint(0, trainenc.input_ids.shape[1] - seqlen - 1) j = i + seqlen inp = trainenc.input_ids[:, i:j] tar = inp.clone() tar[:, :-1] = -100 trainloader.append((inp, tar)) valenc = tokenizer(" ".join(valdata[:1100]["text"]), return_tensors="pt") valenc = valenc.input_ids[:, : (256 * seqlen)] class TokenizerWrapper: def __init__(self, input_ids): self.input_ids = input_ids valenc = TokenizerWrapper(valenc) return trainloader, valenc def get_loaders( name, nsamples=128, seed=0, seqlen=2048, model_id="", trust_remote_code=False ): if "wikitext2" in name: return get_wikitext2(nsamples, seed, seqlen, model_id, trust_remote_code) if "ptb" in name: if "new" in name: return get_ptb_new(nsamples, seed, seqlen, model_id, trust_remote_code) return get_ptb(nsamples, seed, seqlen, model_id, trust_remote_code) if "c4" in name: if "new" in name: return get_c4_new(nsamples, seed, seqlen, model_id, trust_remote_code) return get_c4(nsamples, seed, seqlen, model_id, trust_remote_code) def find_layers(module, layers=(nn.Conv2d, nn.Linear), name=""): # Skip last lm_head linear # Need isintance Falcon is inheriting Linear. if isinstance(module, layers) and "lm_head" not in name: return {name: module} res = {} for name1, child in module.named_children(): res.update( find_layers( child, layers=layers, name=name + "." + name1 if name != "" else name1 ) ) return res @torch.no_grad() def sequential( model, dataloader, dev, nsamples, bits, groupsize, *, hooks, percdamp=0.01, sym: bool = False, act_order: bool = False, ): print("Starting ...") use_cache = model.config.use_cache model.config.use_cache = False try: layers = model.model.layers prefix = "model.layers" except Exception: layers = model.transformer.h prefix = "transformer.h" dtype = next(iter(model.parameters())).dtype inps = torch.zeros( (nsamples, model.seqlen, model.config.hidden_size), dtype=dtype, device=dev ) cache = {"i": 0} extra = {} class Catcher(nn.Module): def __init__(self, module): super().__init__() self.module = module def forward(self, inp, **kwargs): inps[cache["i"]] = inp cache["i"] += 1 extra.update(kwargs.copy()) raise ValueError layers[0] = Catcher(layers[0]) for batch in dataloader: try: model(batch[0].cuda()) except ValueError: pass layers[0] = layers[0].module # layers[0] = layers[0].cpu() # model.model.embed_tokens = model.model.embed_tokens.cpu() # model.model.norm = model.model.norm.cpu() torch.cuda.empty_cache() for hook in hooks: hook.remove() outs = torch.zeros_like(inps) extra = { k: v.to(dev) if isinstance(v, torch.Tensor) else v for k, v in extra.items() } print("Ready.") quantizers = {} for i in range(len(layers)): print(f"Quantizing layer {i+1}/{len(layers)}..") print("+------------------+--------------+------------+-----------+-------+") print("| name | weight_error | fp_inp_SNR | q_inp_SNR | time |") print("+==================+==============+============+===========+=======+") layer = layers[i] layer.load() full = find_layers(layer) sequential = [list(full.keys())] for names in sequential: subset = {n: full[n] for n in names} gptq = {} for name in subset: gptq[name] = GPTQ(subset[name]) gptq[name].quantizer.configure( bits, perchannel=True, sym=sym, mse=False ) pass def add_batch(name): nonlocal gptq def tmp(_, inp, out): gptq[name].add_batch(inp[0].data, out.data) return tmp handles = [] for name in subset: handles.append(subset[name].register_forward_hook(add_batch(name))) for j in range(nsamples): outs[j] = layer(inps[j].unsqueeze(0), **extra)[0] for h in handles: h.remove() for name in subset: scale, zero, g_idx, error = gptq[name].fasterquant( percdamp=percdamp, groupsize=groupsize, act_order=act_order, name=name, ) quantizers[f"{prefix}.{i}.{name}"] = ( gptq[name].quantizer.cpu(), scale.cpu(), zero.cpu(), g_idx.cpu(), bits, groupsize, ) gptq[name].free() for j in range(nsamples): outs[j] = layer(inps[j].unsqueeze(0), **extra)[0] layer.unload() del layer del gptq torch.cuda.empty_cache() inps, outs = outs, inps print("+------------------+--------------+------------+-----------+-------+") print("\n") model.config.use_cache = use_cache return quantizers def make_quant_linear(module, names, bits, groupsize, name=""): if isinstance(module, QuantLinear): return for attr in dir(module): tmp = getattr(module, attr) name1 = name + "." + attr if name != "" else attr if name1 in names: delattr(module, attr) setattr( module, attr, QuantLinear.new( bits, groupsize, tmp.in_features, tmp.out_features, tmp.bias is not None, ), ) for name1, child in module.named_children(): make_quant_linear( child, names, bits, groupsize, name + "." + name1 if name != "" else name1 ) # TODO: perform packing on GPU def pack(model, quantizers, bits, groupsize): layers = find_layers(model) layers = {n: layers[n] for n in quantizers} make_quant_linear(model, quantizers, bits, groupsize) qlayers = find_layers(model, (QuantLinear,)) print("Packing ...") for name in qlayers: print(name) quantizers[name], scale, zero, g_idx, _, _ = quantizers[name] qlayers[name].pack(layers[name], scale, zero, g_idx) print("Done.") return model def setdeepattr(module, full_name, tensor): current = module tokens = full_name.split(".") for token in tokens[:-1]: current = getattr(current, token) setattr(current, tokens[-1], tensor) def getdeepattr(module, full_name): current = module tokens = full_name.split(".") for token in tokens: current = getattr(current, token) return current def load_weights_pre_hook(module_name, weights, recursive=False): def inner(module, args): print(f"Pre hook {module_name}") local_params = {} for k, v in module.named_parameters(): if not recursive and k.count(".") != 1: continue local_params[k] = v for k, v in module.named_buffers(): if not recursive and k.count(".") != 1: continue local_params[k] = v for local_param in local_params: current_tensor = getdeepattr(module, local_param) if current_tensor.device == torch.device("meta"): # print(f"Loading {local_param}") if module_name: tensor_name = f"{module_name}.{local_param}" else: tensor_name = local_param tensor = weights.get_tensor(tensor_name) setdeepattr(module, local_param, nn.Parameter(tensor)) else: tensor = current_tensor.to(device=torch.device("cuda:0")) if current_tensor.requires_grad: tensor = nn.Parameter(tensor) setdeepattr(module, local_param, tensor) return inner def load_weights_post_hook(module_name, weights, recursive=False): def inner(module, args, output): print(f"Post hook {module_name}") local_params = {} for k, v in module.named_parameters(): if not recursive and k.count(".") != 1: continue local_params[k] = v for k, v in module.named_buffers(): if not recursive and k.count(".") != 1: continue local_params[k] = v for local_param in local_params: # print(f"Unloading {local_param}") current_tensor = getdeepattr(module, local_param) setdeepattr( module, local_param, nn.Parameter(current_tensor.to(device=torch.device("cpu"))), ) return output return inner def quantize( model_id: str, bits: int, groupsize: int, output_dir: str, revision: str, trust_remote_code: bool, upload_to_model_id: Optional[str], percdamp: float, act_order: bool, sym: bool, ): print("loading model") config = AutoConfig.from_pretrained( model_id, trust_remote_code=trust_remote_code, ) with init_empty_weights(): model = AutoModelForCausalLM.from_config( config, torch_dtype=torch.float16, trust_remote_code=trust_remote_code ) model = model.eval() print("LOADED model") files = weight_files(model_id, revision, extension=".safetensors") process_group, _, _ = initialize_torch_distributed() weights = Weights( files, device=torch.device("cuda:0"), dtype=torch.float16, process_group=process_group, aliases={"embed_tokens.weight": ["lm_head.weight"]}, weights_loader=DefaultWeightsLoader(UnquantizedWeight), ) hooks = [] for name, module in model.named_modules(): def load(module, name): def _load(): load_weights_pre_hook(name, weights, recursive=True)(module, None) return _load def unload(module, name): def _unload(): load_weights_post_hook(name, weights, recursive=True)( module, None, None ) return _unload module.load = load(module, name) module.unload = unload(module, name) hooks.append( module.register_forward_pre_hook(load_weights_pre_hook(name, weights)) ) hooks.append( module.register_forward_hook(load_weights_post_hook(name, weights)) ) model.seqlen = 2048 dataset = "wikitext2" nsamples = 128 seed = None dataloader, testloader = get_loaders( dataset, nsamples=nsamples, seed=seed, model_id=model_id, seqlen=model.seqlen, trust_remote_code=trust_remote_code, ) tick = time.time() quantizers = sequential( model, dataloader, DEV, nsamples, bits, groupsize, percdamp=percdamp, act_order=act_order, hooks=hooks, sym=sym, ) print(time.time() - tick) pack(model, quantizers, bits, groupsize) from safetensors.torch import save_file from huggingface_hub import split_torch_state_dict_into_shards state_dict = model.state_dict() state_dict = {k: v.cpu().contiguous() for k, v in state_dict.items()} max_shard_size = "10GB" state_dict_split = split_torch_state_dict_into_shards( state_dict, filename_pattern="model.safetensors", max_shard_size=max_shard_size, ) index = None if state_dict_split.is_sharded: index = { "metadata": state_dict_split.metadata, "weight_map": state_dict_split.tensor_to_filename, } shards = state_dict_split.filename_to_tensors os.makedirs(output_dir, exist_ok=True) for shard_file, shard in shards.items(): save_file( shard, os.path.join(output_dir, shard_file), metadata={ "format": "pt", "quantized": "gptq", "origin": "text-generation-inference", }, ) if index is None: path_to_weights = os.path.join(output_dir, "model.safetensors") logger.info(f"Model weights saved in {path_to_weights}") else: save_index_file = "model.safetensors.index.json" save_index_file = os.path.join(output_dir, save_index_file) with open(save_index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) logger.info( f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be " f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the " f"index located at {save_index_file}." ) config = AutoConfig.from_pretrained(model_id, trust_remote_code=trust_remote_code) config.quantization_config = { "bits": bits, "group_size": groupsize, "damp_percent": percdamp, "desc_act": act_order, "static_groups": False, "sym": sym, "quant_method": "gptq", } config.save_pretrained(output_dir) logger.info("Saved config") logger.info("Saving tokenizer") tokenizer = AutoTokenizer.from_pretrained( model_id, trust_remote_code=trust_remote_code ) tokenizer.save_pretrained(output_dir) logger.info("Saved tokenizer") if upload_to_model_id: api = HfApi() api.upload_folder( folder_path=output_dir, repo_id=upload_to_model_id, repo_type="model" )
text-generation-inference/server/text_generation_server/layers/gptq/quantize.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/gptq/quantize.py", "repo_id": "text-generation-inference", "token_count": 16305 }
325
from dataclasses import dataclass from typing import Callable, List, Optional import torch import torch.nn as nn from text_generation_server.layers import moe from text_generation_server.utils.import_utils import SYSTEM from text_generation_server.utils.kernels import load_kernel from text_generation_server.utils.weights import Weights from text_generation_server.layers.marlin.gptq import ( GPTQMarlinWeight, GPTQMarlinWeightsLoader, ) if SYSTEM == "cuda": moe_kernels = load_kernel(module="moe", repo_id="kernels-community/moe") else: moe_kernels = None try: major, _minor = torch.cuda.get_device_capability() has_sm_8_0 = major >= 8 except Exception: has_sm_8_0 = False def can_use_marlin_moe_gemm( *, quant_method: str, quantize: str, sym: bool, ): return ( SYSTEM == "cuda" and moe is not None and has_sm_8_0 and quantize in {"awq", "gptq"} and quant_method in {"awq", "gptq"} # We only support asymmetric quantization for AWQ. and (sym or quant_method == "awq") ) @dataclass class GPTQMarlinMoEWeight: qweight: torch.Tensor qzeros: torch.Tensor scales: torch.Tensor g_idx: torch.Tensor perm: torch.Tensor is_full_k: bool class GPTQMarlinSparseMoELayer(nn.Module): """ MoE layer that uses a fused GPTQ-Marlin kernel. """ def __init__( self, *, n_expert_group: Optional[int], n_experts: int, prefix: str, renormalize: bool, topk: int, topk_group: Optional[int], weights: Weights, gate_proj_name: str = "gate_proj", up_proj_name: str = "up_proj", down_proj_name: str = "down_proj", scoring_func: Optional[str] = None, e_score_correction_bias: Optional[float] = None, ): assert scoring_func in ( "sigmoid", "softmax", ), f"scoring func {scoring_func} is not handled" super().__init__() if not ( isinstance(weights.loader, GPTQMarlinWeightsLoader) and can_use_marlin_moe_gemm( quant_method=weights.loader.quant_method, quantize=weights.loader.quantize, sym=weights.loader.sym, ) ): raise ValueError( f"Unsupported weights loader: {type(weights.loader)}, only GPTQMarlinWeightsLoader with AWQ and symmetric GPTQ quantization is supported" ) assert (n_expert_group is None) == ( topk_group is None ), "n_expert_group and topk_group must both be None or have some value" self.n_expert_group = n_expert_group self.topk = topk self.topk_group = topk_group self.renormalize = renormalize self.scoring_func = scoring_func self.e_score_correction_bias = e_score_correction_bias self.gate_up_proj = _load_expert_multi_weights_col( prefix=prefix, n_experts=n_experts, names=[gate_proj_name, up_proj_name], weights=weights, ) self.down_proj = _load_expert_weights_row( prefix=prefix, n_experts=n_experts, name=down_proj_name, weights=weights ) self.bits = weights.loader.bits def forward(self, x: torch.Tensor, *, gating_output: torch.Tensor) -> torch.Tensor: return fused_marlin_moe( hidden_states=x, w1=self.gate_up_proj.qweight, w2=self.down_proj.qweight, w1_scale=self.gate_up_proj.scales, w2_scale=self.down_proj.scales, w1_zeros=( self.gate_up_proj.qzeros if self.gate_up_proj.qzeros.numel() > 0 else None ), w2_zeros=( self.down_proj.qzeros if self.down_proj.qzeros.numel() > 0 else None ), g_idx1=self.gate_up_proj.g_idx, g_idx2=self.down_proj.g_idx, sort_indices1=self.gate_up_proj.perm, sort_indices2=self.down_proj.perm, is_k_full=self.gate_up_proj.is_full_k or self.down_proj.is_full_k, gating_output=gating_output, topk=self.topk, renormalize=self.renormalize, use_grouped_topk=self.n_expert_group is not None, num_expert_group=self.n_expert_group, topk_group=self.topk_group, num_bits=self.bits, scoring_func=self.scoring_func, e_score_correction_bias=self.e_score_correction_bias, ) def _load_expert_multi_weights_col( *, prefix: str, n_experts: int, names: List[str], weights: Weights, ) -> GPTQMarlinMoEWeight: moe_weight = None for i in range(n_experts): weight = weights.get_multi_weights_col( [f"{prefix}.{i}.{name}" for name in names], 0 ) assert isinstance(weight, GPTQMarlinWeight) moe_weight = _pack_weight( n_experts=n_experts, expert=i, weight=weight, moe_weight=moe_weight ) assert moe_weight is not None return moe_weight def _load_expert_weights_row( *, prefix: str, n_experts: int, name: str, weights: Weights, ) -> GPTQMarlinMoEWeight: moe_weight = None for i in range(n_experts): weight = weights.get_weights_row( f"{prefix}.{i}.{name}", ) assert isinstance(weight, GPTQMarlinWeight) moe_weight = _pack_weight( n_experts=n_experts, expert=i, weight=weight, moe_weight=moe_weight ) assert moe_weight is not None return moe_weight def _pack_weight( *, n_experts: int, expert: int, moe_weight: Optional[GPTQMarlinMoEWeight], weight: GPTQMarlinWeight, ) -> GPTQMarlinMoEWeight: if moe_weight is None: qweight = torch.empty( (n_experts,) + weight.qweight.shape, dtype=weight.qweight.dtype, device=weight.qweight.device, ) qzeros = torch.empty( (n_experts,) + weight.qzeros.shape, dtype=weight.qzeros.dtype, device=weight.qzeros.device, ) scales = torch.empty( (n_experts,) + weight.scales.shape, dtype=weight.scales.dtype, device=weight.scales.device, ) g_idx = torch.empty( (n_experts,) + weight.g_idx.shape, dtype=weight.g_idx.dtype, device=weight.g_idx.device, ) perm = torch.empty( (n_experts,) + weight.perm.shape, dtype=weight.perm.dtype, device=weight.perm.device, ) moe_weight = GPTQMarlinMoEWeight( qweight=qweight, qzeros=qzeros, scales=scales, g_idx=g_idx, perm=perm, is_full_k=weight.is_full_k, ) moe_weight.qweight[expert] = weight.qweight moe_weight.qzeros[expert] = weight.qzeros moe_weight.scales[expert] = weight.scales moe_weight.g_idx[expert] = weight.g_idx moe_weight.perm[expert] = weight.perm return moe_weight def fused_marlin_moe( *, hidden_states: torch.Tensor, w1: torch.Tensor, w2: torch.Tensor, w1_scale: Optional[torch.Tensor] = None, w2_scale: Optional[torch.Tensor] = None, gating_output: torch.Tensor, g_idx1: torch.Tensor, g_idx2: torch.Tensor, sort_indices1: torch.Tensor, sort_indices2: torch.Tensor, w1_zeros: Optional[torch.Tensor] = None, w2_zeros: Optional[torch.Tensor] = None, is_k_full: bool, topk: int, renormalize: bool, num_bits: int = 8, use_grouped_topk: bool = False, num_expert_group: Optional[int] = None, custom_routing_function: Optional[Callable] = None, topk_group: Optional[int] = None, scoring_func: Optional[str] = None, e_score_correction_bias: Optional[float] = None, ) -> torch.Tensor: """ This function computes a Mixture of Experts (MoE) layer using two sets of weights, w1 and w2, and top-k gating mechanism. Parameters: - hidden_states (torch.Tensor): The input tensor to the MoE layer. - w1 (torch.Tensor): The first set of expert weights. - w2 (torch.Tensor): The second set of expert weights. - w1_scale (Optional[torch.Tensor]): Optional scale to be used for w1. - w2_scale (Optional[torch.Tensor]): Optional scale to be used for w2. - gating_output (torch.Tensor): The output of the gating operation (before softmax). - g_idx1 (torch.Tensor): The first set of act_order indices. - g_idx2 (torch.Tensor): The second set of act_order indices. - sort_indices1 (torch.Tensor): The first act_order input permutation. - sort_indices2 (torch.Tensor): The second act_order input permutation. - w1_zeros (Optional[torch.Tensor]): Optional zero points to be used for w1. - w2_zeros (Optional[torch.Tensor]): Optional zero points to be used for w2. - renormalize (bool): If True, renormalize the top-k weights to sum to 1. - num_bits (bool): The number of bits in expert weights quantization. Returns: - torch.Tensor: The output tensor after applying the MoE layer. """ # Check constraints. assert hidden_states.shape[0] == gating_output.shape[0], "Number of tokens mismatch" assert hidden_states.shape[1] == w1.shape[1] * 16, "Hidden size mismatch w1" assert hidden_states.shape[1] == w2.shape[2] // ( num_bits // 2 ), "Hidden size mismatch w2" assert gating_output.shape[1] == w1.shape[0], "Number of experts mismatch" assert hidden_states.is_contiguous(), "Hidden_states must be contiguous" assert w1.is_contiguous(), "Expert weights1 must be contiguous" assert w2.is_contiguous(), "Expert weights2 must be contiguous" assert hidden_states.dtype == torch.float16 assert num_bits in [4, 8] # DeekSeekv2 uses grouped_top_k if use_grouped_topk: assert topk_group is not None assert num_expert_group is not None topk_weights, topk_ids = moe_kernels.grouped_topk( hidden_states=hidden_states, gating_output=gating_output, topk=topk, renormalize=renormalize, num_expert_group=num_expert_group, topk_group=topk_group, scoring_func=scoring_func, e_score_correction_bias=e_score_correction_bias, ) elif custom_routing_function is None: topk_weights, topk_ids = moe_kernels.fused_topk( hidden_states=hidden_states, gating_output=gating_output, topk=topk, renormalize=renormalize, ) else: topk_weights, topk_ids = custom_routing_function( hidden_states=hidden_states, gating_output=gating_output, topk=topk, renormalize=renormalize, ) return moe_kernels.fused_marlin_moe( hidden_states=hidden_states, w1=w1, w2=w2, w1_scale=w1_scale, w2_scale=w2_scale, gating_output=gating_output, topk_weights=topk_weights, topk_ids=topk_ids, g_idx1=g_idx1, g_idx2=g_idx2, sort_indices1=sort_indices1, sort_indices2=sort_indices2, w1_zeros=w1_zeros, w2_zeros=w2_zeros, num_bits=num_bits, is_k_full=is_k_full, )
text-generation-inference/server/text_generation_server/layers/moe/gptq_marlin.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/layers/moe/gptq_marlin.py", "repo_id": "text-generation-inference", "token_count": 5580 }
326
# coding=utf-8 # Copyright 2024 HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed from torch import nn from typing import Optional, List, Tuple import copy from text_generation_server.layers import ( TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, get_linear, # SpeculativeHead, TensorParallelMultiAdapterLinear, TensorParallelAdapterRowLinear, ) import torch import torch.nn.functional as F from text_generation_server.models.custom_modeling.vlm import ( load_text_model, load_vision_model, ) from text_generation_server.layers.attention.kv_cache import get_kv_scales from text_generation_server.layers.rotary import PositionRotaryEmbedding from text_generation_server.layers.layernorm import ( FastRMSNorm, ) from text_generation_server.models.globals import ATTENTION from text_generation_server.utils.weights import UnquantizedWeight from transformers.activations import ACT2FN from text_generation_server.layers.attention import ( paged_attention, attention, Seqlen, ) ATTENTION_TYPE_GLOBAL = "global" ATTENTION_TYPE_LOCAL = "local_sliding" class Gemma3FastRMSNorm(FastRMSNorm): @classmethod def load(cls, prefix: str, weights, eps=1e-6): dtype = weights.dtype weights.dtype = torch.float32 weight = weights.get_tensor(f"{prefix}.weight") + 1 weights.dtype = dtype new = cls(weight, eps) new.dtype = dtype return new # perform the multiplication in full precision and downcast after def forward(self, hidden_states, residual=None): if residual is not None: hidden_states += residual residual = hidden_states hidden_states = hidden_states.to(torch.float32) variance = hidden_states.pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) hidden_states = hidden_states * self.weight return hidden_states.to(self.dtype), residual def load_attention(config, prefix: str, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=False, ) def _load_gqa(config, prefix: str, weights): assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, ) if isinstance(weight, UnquantizedWeight): weight.weight = weight.weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.head_dim num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" return TensorParallelColumnLinear(get_linear(weight, bias=None)) class FlashGemma3Attention(torch.nn.Module): def __init__( self, prefix: str, config, weights, layer_id, causal: bool, is_sliding: bool ): super().__init__() self.num_heads = config.num_attention_heads self.head_size = config.head_dim self.causal = causal if is_sliding: self.window_size = config.sliding_window # TODO: remove this hack to support local sliding window config = copy.deepcopy(config) config.rope_scaling = dict(rope_type="default") self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=config.head_dim, base=config.rope_local_base_freq, device=weights.device, ) else: self.window_size = -1 self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=config.head_dim, base=config.rope_theta, device=weights.device, ) self.softmax_scale = ( config.query_pre_attn_scalar**-0.5 if config.query_pre_attn_scalar is not None else None ) if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( config.num_key_value_heads // weights.process_group.size() ) self.softcap = None # config.attn_logit_softcapping query_key_value = load_attention(config, prefix, weights) self.query_key_value = TensorParallelMultiAdapterLinear.load( query_key_value, layer_id, ["q_proj", "k_proj", "v_proj"], sizes=[ self.head_size * config.num_attention_heads, self.head_size * config.num_key_value_heads, self.head_size * config.num_key_value_heads, ], process_group=weights.process_group, ) self.kv_scales = get_kv_scales(weights, f"{prefix}") o_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.o_proj", weights=weights, bias=False, ) self.o_proj = TensorParallelAdapterRowLinear.load( o_proj, layer_id, "o_proj", process_group=weights.process_group, ) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange( 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device ).repeat_interleave(self.num_groups) self.q_norm = Gemma3FastRMSNorm.load( prefix=f"{prefix}.q_norm", weights=weights, eps=config.rms_norm_eps ) self.k_norm = Gemma3FastRMSNorm.load( prefix=f"{prefix}.k_norm", weights=weights, eps=config.rms_norm_eps ) self.enable_gqa = self.num_heads != self.num_key_value_heads def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, adapter_data, attention_mask, ): qkv = self.query_key_value(hidden_states, adapter_data) query, kv = qkv.split( [ self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads, ], dim=1, ) kv = kv.view(-1, 2, self.num_key_value_heads * self.head_size) key = kv[:, 0] value = kv[:, 1] query = query.reshape(-1, self.head_size) key = key.reshape(-1, self.head_size) query, _ = self.q_norm(query.contiguous()) key, _ = self.k_norm(key.contiguous()) query = query.view(-1, self.num_heads, self.head_size) key = key.view(-1, self.num_key_value_heads, self.head_size) value = value.view(-1, self.num_key_value_heads, self.head_size) self.rotary_emb(query, key, cos, sin) kv_cache.store( key=key, value=value, slots=slots, kv_scales=self.kv_scales, ) # Prefill if cu_seqlen_prefill is not None: if attention_mask is None or ATTENTION == "flashinfer": # flash attention attn_output = attention( query=query, key=key, value=value, kv_cache=kv_cache, kv_scales=self.kv_scales, seqlen=seqlen, block_tables=block_tables, softmax_scale=self.softmax_scale, window_size_left=self.window_size, softcap=self.softcap, ) else: lengths = cu_seqlen_prefill[1:] - cu_seqlen_prefill[:-1] # Split tensors using vectorized split query_list = torch.split(query, lengths.tolist(), dim=0) key_list = torch.split(key, lengths.tolist(), dim=0) value_list = torch.split(value, lengths.tolist(), dim=0) padded_query = torch.nn.utils.rnn.pad_sequence( query_list, batch_first=True ) padded_key = torch.nn.utils.rnn.pad_sequence(key_list, batch_first=True) padded_value = torch.nn.utils.rnn.pad_sequence( value_list, batch_first=True ) padded_query = padded_query.transpose(1, 2).contiguous() padded_key = padded_key.transpose(1, 2).contiguous() padded_value = padded_value.transpose(1, 2).contiguous() # Compute attention attn_output = F.scaled_dot_product_attention( padded_query, padded_key, padded_value, attn_mask=attention_mask, scale=self.softmax_scale, enable_gqa=self.enable_gqa, ) attn_output = attn_output.transpose( 1, 2 ) # [batch_size, seq_len, num_heads, head_dim] max_seq_len = padded_query.size(2) seq_range = torch.arange( max_seq_len, device=padded_query.device ).unsqueeze(0) lengths_tensor = torch.tensor( lengths, device=padded_query.device ).unsqueeze(1) mask = seq_range < lengths_tensor # [batch, max_seq_len] attn_output = attn_output[mask] # [total_seq_len, num_heads, head_dim] # Decode else: attn_output = paged_attention( query, kv_cache, self.kv_head_mapping, self.softmax_scale, block_tables, seqlen, max_s, softcap=self.softcap, kv_scales=self.kv_scales, window_size_left=self.window_size, ) return self.o_proj( attn_output.view(-1, self.num_heads * self.head_size), adapter_data ) class Gemma3MLP(nn.Module): def __init__(self, prefix, config, weights, layer_id): super().__init__() act = config.hidden_activation self.act = ( ACT2FN[act] if "gelu" not in act else lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) ) # Fuse gate and up proj gate_up_proj = TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.gate_proj", f"{prefix}.up_proj"], weights=weights, dim=0, bias=False, ) self.gate_up_proj = TensorParallelMultiAdapterLinear.load( gate_up_proj, layer_id, ["gate_proj", "up_proj"], sizes=[ config.intermediate_size, config.intermediate_size, ], process_group=weights.process_group, ) down_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.down_proj", weights=weights, bias=False, ) self.down_proj = TensorParallelAdapterRowLinear.load( down_proj, layer_id, "down_proj", process_group=weights.process_group, ) self.intermediate_size = ( config.intermediate_size // weights.process_group.size() ) def forward(self, hidden_states, adapter_data): gate_up_states = self.gate_up_proj(hidden_states, adapter_data) gate_up_states = gate_up_states.view(-1, 2, self.intermediate_size) return self.down_proj( self.act(gate_up_states[:, 0]) * gate_up_states[:, 1], adapter_data ) class FlashGemma3Layer(nn.Module): def __init__( self, prefix: str, config, weights, layer_id, causal: bool, is_sliding: bool ): super().__init__() self.self_attn = FlashGemma3Attention( prefix=f"{prefix}.self_attn", config=config, weights=weights, layer_id=layer_id, causal=causal, is_sliding=is_sliding, ) self.mlp = Gemma3MLP( prefix=f"{prefix}.mlp", config=config, weights=weights, layer_id=layer_id ) self.input_layernorm = Gemma3FastRMSNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps ) self.post_attention_layernorm = Gemma3FastRMSNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=config.rms_norm_eps, ) self.pre_feedforward_layernorm = Gemma3FastRMSNorm.load( prefix=f"{prefix}.pre_feedforward_layernorm", weights=weights, eps=config.rms_norm_eps, ) self.post_feedforward_layernorm = Gemma3FastRMSNorm.load( prefix=f"{prefix}.post_feedforward_layernorm", weights=weights, eps=config.rms_norm_eps, ) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, adapter_data, attention_mask, ): normed_hidden_states, res = self.input_layernorm(hidden_states, residual) # Self Attention attn_output = self.self_attn( normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, adapter_data, attention_mask, ) # faster post attention rms norm normed_attn_res_output, _ = self.post_attention_layernorm(attn_output) normed_attn_res_output = normed_attn_res_output + res res = normed_attn_res_output pre_normed, _ = self.pre_feedforward_layernorm(normed_attn_res_output) mlp_output = self.mlp(pre_normed, adapter_data) post_hidden_states, _ = self.post_feedforward_layernorm(mlp_output) return post_hidden_states, normed_attn_res_output class FlashGemma3Model(torch.nn.Module): def __init__(self, prefix: str, config, weights, causal: bool): super().__init__() process_group = weights.process_group self.tp_rank = process_group.rank() self.tp_world_size = process_group.size() self.layers = nn.ModuleList( [ FlashGemma3Layer( prefix=f"{prefix}.layers.{layer_id}", config=config, weights=weights, layer_id=layer_id, causal=causal, is_sliding=bool((layer_id + 1) % config.sliding_window_pattern), ) for layer_id in range(config.num_hidden_layers) ] ) self.norm = Gemma3FastRMSNorm.load( prefix=f"{prefix}.norm", weights=weights, eps=config.rms_norm_eps ) self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward( self, inputs_embeds: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, adapter_data: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, attention_mask_local: Optional[torch.Tensor] = None, ) -> torch.Tensor: hidden_states = inputs_embeds # Get rotary cos and sin for this forward # Avoid to index in each layer residual = None for i, layer in enumerate(self.layers): cos, sin = self.layers[i].self_attn.rotary_emb.get_cos_sin( position_ids, max_s, hidden_states.dtype ) hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, seqlen, max_s, adapter_data, ( attention_mask if self.layers[i].self_attn.window_size == -1 else attention_mask_local ), ) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class FlashGemma3ForCausalLM(torch.nn.Module): def __init__(self, prefix: str, config, weights, *, causal: bool = True): super().__init__() embed_norm = config.hidden_size**0.5 if not prefix: prefix = "model" else: prefix = f"{prefix}.model" self.embed_tokens = TensorParallelEmbedding( prefix=f"{prefix}.embed_tokens", weights=weights ) self.embed_tokens.weight *= embed_norm self.model = FlashGemma3Model( prefix=prefix, config=config, weights=weights, causal=causal ) self.lm_head = SpeculativeHead.load( prefix=( f"{prefix}.embed_tokens" if config.tie_word_embeddings else f"{prefix}.lm_head" ), config=config, weights=weights, ) # self.softcap = config.attn_logit_softcapping # assert isinstance(self.softcap, float) self.softcap = None def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, adapter_data: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: input_embeds = self.embed_tokens(input_ids) hidden_states = self.model( input_embeds, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, seqlen, max_s, adapter_data, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits, speculative_logits = self.lm_head(hidden_states) return logits, speculative_logits class Gemma3MultimodalInputProjection(torch.nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.mm_input_projection_weight = weights.get_tensor( "multi_modal_projector.mm_input_projection_weight" ) self.mm_soft_emb_norm = Gemma3FastRMSNorm.load( prefix=f"{prefix}.mm_soft_emb_norm", weights=weights, eps=config.vision_config.layer_norm_eps, ) self.patches_per_image = int( config.vision_config.image_size // config.vision_config.patch_size ) self.tokens_per_side = int(config.mm_tokens_per_image**0.5) self.kernel_size = self.patches_per_image // self.tokens_per_side self.avg_pool = nn.AvgPool2d( kernel_size=self.kernel_size, stride=self.kernel_size ) def forward(self, vision_outputs: torch.Tensor): batch_size, _, seq_length = vision_outputs.shape reshaped_vision_outputs = vision_outputs.transpose(1, 2) reshaped_vision_outputs = reshaped_vision_outputs.reshape( batch_size, seq_length, self.patches_per_image, self.patches_per_image ) reshaped_vision_outputs = reshaped_vision_outputs.contiguous() pooled_vision_outputs = self.avg_pool(reshaped_vision_outputs) pooled_vision_outputs = pooled_vision_outputs.flatten(2) pooled_vision_outputs = pooled_vision_outputs.transpose(1, 2) normed_vision_outputs, _ = self.mm_soft_emb_norm(pooled_vision_outputs) projected_vision_outputs = torch.matmul( normed_vision_outputs, self.mm_input_projection_weight ) return projected_vision_outputs.type_as(vision_outputs) class Gemma3ForConditionalGeneration(nn.Module): def __init__(self, prefix, config, weights): super().__init__() self.config = config if config.vision_config is not None: config.vision_config.quantize = config.quantize self.post_vision_model_layernorm = nn.LayerNorm.load( prefix="vision_tower.vision_model.post_layernorm", weights=weights, eps=config.vision_config.layer_norm_eps, ) self.multimodal_projector = Gemma3MultimodalInputProjection( prefix="multi_modal_projector", config=config, weights=weights, ) text_config = config.text_config text_config.speculator = config.speculator text_config.quantize = config.quantize self.vision_model = load_vision_model( prefix="vision_tower" if not prefix else f"{prefix}.vision_tower", config=config.vision_config, weights=weights, ) self.text_model = load_text_model( prefix="language_model" if not prefix else f"{prefix}.language_model", config=config.text_config, weights=weights, ) else: config.text_config.quantize = config.quantize config.text_config.speculator = config.speculator self.text_model = load_text_model( prefix=prefix, config=config.text_config, weights=weights, ) self.pad_token_id = ( config.pad_token_id if config.pad_token_id is not None else -1 ) self.dtype = weights.dtype def get_attention_mask( self, input_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], dtype: torch.dtype, bool_mask: bool = False, ): image_token_mask = (input_ids == self.config.image_token_index).to( input_ids.device ) device = input_ids.device min_dtype = torch.finfo(dtype).min lengths = (cu_seqlen_prefill[1:] - cu_seqlen_prefill[:-1]).tolist() batch_size = len(lengths) sequence_length = max(lengths) target_length = sequence_length # Create the padding mask from the computed lengths. # pad_mask: [batch, sequence_length] where True indicates valid tokens. seq_range = torch.arange(sequence_length, device=device).unsqueeze(0) lengths_tensor = torch.tensor(lengths, device=device).unsqueeze(1) pad_mask = seq_range < lengths_tensor # shape: [batch, sequence_length] # Build the base causal mask (for non-image tokens): causal_mask = torch.tril( torch.ones( (sequence_length, sequence_length), dtype=torch.bool, device=device ) ) base_mask = pad_mask.unsqueeze(2) & pad_mask.unsqueeze( 1 ) # [batch, sequence_length, sequence_length] base_mask = base_mask & causal_mask.unsqueeze(0) # apply causal constraint image_token_mask = torch.nn.utils.rnn.pad_sequence( torch.split(image_token_mask, lengths), batch_first=True, padding_value=0 ) bidirectional_mask = image_token_mask.unsqueeze(2) & image_token_mask.unsqueeze( 1 ) # Combine the causal base mask and the bidirectional mask. combined_mask = torch.logical_or( base_mask.unsqueeze(1), bidirectional_mask.unsqueeze(1) ).to(device) # combined_mask now has shape [batch, 1, sequence_length, sequence_length] full_attention_mask = torch.zeros( (batch_size, 1, sequence_length, target_length), device=device, dtype=torch.bool, ) full_attention_mask[:, :, :, :sequence_length] = combined_mask if bool_mask: return full_attention_mask else: return torch.where(full_attention_mask, 0, min_dtype).to(device) def get_vision_embeds( self, pixel_values: torch.FloatTensor, pixel_attention_mask: Optional[torch.FloatTensor] = None, image_sizes: Optional[torch.Tensor] = None, image_grid_thw: Optional[torch.LongTensor] = None, ): pixel_values = pixel_values.to(dtype=self.dtype) image_outputs = self.vision_model(pixel_values) vision_outputs = self.post_vision_model_layernorm( image_outputs.last_hidden_state ) image_features = self.multimodal_projector(vision_outputs) image_features = image_features.view(-1, image_features.shape[-1]) return image_features def get_inputs_embeds( self, input_ids: torch.Tensor, vision_embeds: torch.Tensor = None, ): inputs_embeds = self.text_model.embed_tokens(input_ids) if vision_embeds is not None: # Replace the image token embeddings with the vision features image_token_mask = (input_ids == self.config.image_token_index).to( input_ids.device ) inputs_embeds[image_token_mask] = vision_embeds.view( -1, vision_embeds.shape[-1] ) return inputs_embeds def forward( self, inputs_embeds: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, prefill_cache_indices: Optional[torch.Tensor] = None, lm_head_indices: Optional[torch.Tensor] = None, pixel_values: torch.FloatTensor = None, # Unused here attention_mask: Optional[torch.BoolTensor] = None, adapter_data: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: if cu_seqlen_prefill is not None: max_s += 1 position_ids += 1 # Use flash attention for text-only input # else: # if cu_seqlen_prefill is not None: # min_dtype = torch.finfo(inputs_embeds.dtype).min # lengths = (cu_seqlen_prefill[1:] - cu_seqlen_prefill[:-1]).tolist() # # Determine the maximum sequence length (after padding) from query. # sequence_length = max(lengths) # target_length = sequence_length # # Create the padding mask from the computed lengths. # # pad_mask: [batch, sequence_length] where True indicates valid tokens. # seq_range = torch.arange( # sequence_length, device=input_ids.device # ).unsqueeze(0) # lengths_tensor = torch.tensor( # lengths, device=input_ids.device # ).unsqueeze(1) # pad_mask = seq_range < lengths_tensor # shape: [batch, sequence_length] # # Build the base causal mask (for non-image tokens): # causal_mask = torch.tril( # torch.ones( # (sequence_length, sequence_length), # dtype=torch.bool, # device=input_ids.device, # ) # ) # base_mask = pad_mask.unsqueeze(2) & pad_mask.unsqueeze( # 1 # ) # [batch, sequence_length, sequence_length] # base_mask = base_mask & causal_mask.unsqueeze(0) # attention_mask = base_mask.unsqueeze( # 1 # ) # [batch, 1, sequence_length, sequence_length] # full_attention_mask = torch.zeros( # (len(lengths), 1, sequence_length, target_length), # device=input_ids.device, # dtype=torch.bool, # ) # full_attention_mask[:, :, :, :sequence_length] = attention_mask # attention_mask = torch.where(full_attention_mask, 0, min_dtype).to( # input_ids.device # ) if attention_mask is not None: min_dtype = torch.finfo(inputs_embeds.dtype).min # prefill may be larger than sliding window effective_seq_len = max( position_ids.shape[0], self.config.text_config.sliding_window ) sliding_window_mask = torch.tril( torch.ones_like(attention_mask, dtype=torch.bool), diagonal=-self.config.text_config.sliding_window, ) attention_mask_local = torch.where( sliding_window_mask, min_dtype, attention_mask ) offset = max(0, position_ids.shape[0] - effective_seq_len) attention_mask_local = attention_mask_local[ :, :, :, offset : offset + effective_seq_len ] else: attention_mask_local = None hidden_states = self.text_model.model( inputs_embeds=inputs_embeds, position_ids=position_ids, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, attention_mask=attention_mask, attention_mask_local=attention_mask_local, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits, speculative_logits = self.text_model.lm_head(hidden_states) # pad logit with 1 zero logit for the image token if pixel_values is not None: logits = torch.cat( [logits, torch.zeros(logits.size(0), 1, device=logits.device)], dim=1 ) if speculative_logits is not None: speculative_logits = torch.cat( [ speculative_logits, torch.zeros( speculative_logits.size(0), 1, device=speculative_logits.device, ), ], dim=1, ) return logits, speculative_logits
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_gemma3_modeling.py", "repo_id": "text-generation-inference", "token_count": 16595 }
327
# coding=utf-8 # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch OPT model.""" import random from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from transformers.modeling_utils import PreTrainedModel from transformers import OPTConfig from text_generation_server.layers import ( FastLinear, TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead, ) EPS = 1e-5 # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full( (tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device, ) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat( [ torch.zeros( tgt_len, past_key_values_length, dtype=dtype, device=device ), mask, ], dim=-1, ) return mask[None, None, :, :].expand( bsz, 1, tgt_len, tgt_len + past_key_values_length ) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill( inverted_mask.to(torch.bool), torch.finfo(dtype).min ) class OPTLearnedPositionalEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, prefix: str, weights): super().__init__() self.offset = 2 self.weight = nn.Parameter( weights.get_tensor( f"{prefix if prefix else ''}decoder.embed_positions.weight" ) ) def forward( self, attention_mask: torch.LongTensor, past_key_values_length: int = 0 ): """`input_ids_shape` is expected to be [bsz x seqlen].""" attention_mask = attention_mask.long() # create positions depending on attention_mask positions = ( torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask ).long() - 1 # cut positions if `past_key_values_length` is > 0 positions = positions[:, past_key_values_length:] return torch.nn.functional.embedding(positions + self.offset, self.weight) class OPTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, config, prefix, weights, is_decoder: bool = False, bias: bool = True, process_group=None, ): super().__init__() hidden_size = config.hidden_size num_heads = config.num_attention_heads self.hidden_size = hidden_size self.num_heads = num_heads self.dropout = config.dropout self.head_dim = hidden_size // num_heads if (self.head_dim * num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder process_group = weights.process_group if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // process_group.size() self.hidden_size = self.hidden_size // process_group.size() self.q_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.q_proj", weights=weights, bias=bias ) self.k_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.k_proj", weights=weights, bias=bias ) self.v_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.v_proj", weights=weights, bias=bias ) self.out_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.out_proj", weights=weights, bias=bias ) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return ( tensor.view(bsz, seq_len, self.num_heads, self.head_dim) .transpose(1, 2) .contiguous() ) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = ( attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask ) attn_weights = torch.max( attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min) ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) # upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437 if attn_weights.dtype == torch.float16: attn_weights = nn.functional.softmax( attn_weights, dim=-1, dtype=torch.float32 ).to(torch.float16) else: attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view( bsz, self.num_heads, tgt_len, src_len ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view( bsz, self.num_heads, tgt_len, src_len ) attn_weights = attn_weights_reshaped.view( bsz * self.num_heads, tgt_len, src_len ) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `hidden_size` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned aross GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.hidden_size) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class OPTDecoderLayer(nn.Module): def __init__(self, layer_id: int, prefix: str, config: OPTConfig, weights): super().__init__() self.process_group = weights.process_group self.hidden_size = config.hidden_size self.self_attn = OPTAttention( config, prefix=f"{prefix}.self_attn", weights=weights, is_decoder=True, bias=config.enable_bias, ) self.do_layer_norm_before = config.do_layer_norm_before self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.self_attn_layer_norm = nn.LayerNorm.load( prefix=f"{prefix}.self_attn_layer_norm", weights=weights, eps=EPS ) self.fc1 = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.fc1", weights=weights, bias=config.enable_bias ) self.fc2 = TensorParallelRowLinear.load( config, prefix=f"{prefix}.fc2", weights=weights, bias=config.enable_bias ) self.final_layer_norm = nn.LayerNorm.load( prefix=f"{prefix}.final_layer_norm", weights=weights, eps=EPS ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, past_key_value: Optional[Tuple[torch.Tensor]] = None, ) -> Tuple[ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] ]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, hidden_size)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout( hidden_states, p=self.dropout, training=self.training ) hidden_states = residual + hidden_states # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Fully Connected hidden_states_shape = hidden_states.shape hidden_states = hidden_states.reshape(-1, hidden_states.size(-1)) residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout( hidden_states, p=self.dropout, training=self.training ) hidden_states = (residual + hidden_states).view(hidden_states_shape) # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class OPTPreTrainedModel(PreTrainedModel): config_class = OPTConfig class OPTDecoder(OPTPreTrainedModel): def __init__(self, prefix: str, config: OPTConfig, weights): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.vocab_size = config.vocab_size prefix = prefix + "." if prefix else "" self.embed_tokens = TensorParallelEmbedding( prefix=f"{prefix}decoder.embed_tokens", weights=weights ) self.embed_positions = OPTLearnedPositionalEmbedding(prefix, weights) if config.word_embed_proj_dim != config.hidden_size: self.project_out = FastLinear.load( config, prefix=f"{prefix}decoder.project_out", weights=weights, bias=False, ) else: self.project_out = None if config.word_embed_proj_dim != config.hidden_size: self.project_in = FastLinear.load( config, prefix=f"{prefix}decoder.project_in", weights=weights, bias=False, ) else: self.project_in = None # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 if config.do_layer_norm_before and not config._remove_final_layer_norm: self.final_layer_norm = nn.LayerNorm.load( prefix=f"{prefix}decoder.final_layer_norm", weights=weights, eps=EPS ) else: self.final_layer_norm = None self.layers = nn.ModuleList( [ OPTDecoderLayer( layer_id, prefix=f"{prefix}decoder.layers.{layer_id}", config=config, weights=weights, ) for layer_id in range(config.num_hidden_layers) ] ) # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask( self, attention_mask, input_shape, inputs_embeds, past_key_values_length ): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask( attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ).to(inputs_embeds.device) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError( "You have to specify either decoder_input_ids or decoder_inputs_embeds" ) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape past_key_values_length = ( past_key_values[0][0].shape[2] if past_key_values is not None else 0 ) # required mask seq length can be calculated via length of past mask_seq_length = past_key_values_length + seq_length # embed positions if attention_mask is None: attention_mask = torch.ones( batch_size, mask_seq_length, device=inputs_embeds.device ) causal_attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) pos_embeds = self.embed_positions(attention_mask, past_key_values_length) if self.project_in is not None: inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None # check if head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask], ["head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = ( past_key_values[idx] if past_key_values is not None else None ) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states = self.project_out(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) class OPTModel(OPTPreTrainedModel): def __init__(self, prefix: str, config: OPTConfig, weights): super().__init__(config) self.decoder = OPTDecoder(prefix, config, weights) # Initialize weights and apply final processing def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs return BaseModelOutputWithPast( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, ) class OPTForCausalLM(OPTPreTrainedModel): def __init__(self, prefix, config, weights): super().__init__(config) if not prefix and any(s.startswith("model") for s in weights.routing.keys()): prefix = "model" self.model = OPTModel(prefix, config, weights) self.lm_head = SpeculativeHead.load( config, prefix=f"{prefix + '.' if prefix else ''}decoder.embed_tokens", weights=weights, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits, speculative_logits = self.lm_head(outputs.last_hidden_state) loss = None return ( CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ), speculative_logits, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs, ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple( past_state.index_select(0, beam_idx) for past_state in layer_past ), ) return reordered_past
text-generation-inference/server/text_generation_server/models/custom_modeling/opt_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/opt_modeling.py", "repo_id": "text-generation-inference", "token_count": 15911 }
328
import math from typing import List, Optional import torch from opentelemetry import trace from transformers import AutoTokenizer, AutoModelForCausalLM import transformers.modeling_utils from text_generation_server.models.flash_causal_lm import FlashCausalLM from text_generation_server.utils import initialize_torch_distributed from text_generation_server.layers.attention import paged_attention, attention, Seqlen from text_generation_server.layers.attention.kv_cache import KVScales, KVCache from text_generation_server.models.globals import ATTENTION from text_generation_server.utils.import_utils import SYSTEM tracer = trace.get_tracer(__name__) def tgi_flash_attention_forward( module, query_states: torch.Tensor, key_states: torch.Tensor, value_states: torch.Tensor, attention_mask: Optional[torch.Tensor], # This is a positional arg in Transformers kv_cache: List[KVCache], kv_head_mapping: torch.Tensor, slots: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], seqlen: Seqlen, block_tables: torch.Tensor, max_s: int, kv_scales: KVScales, softmax_scale: Optional[float] = None, sliding_window: Optional[int] = None, softcap: Optional[float] = None, **kwargs, # This is needed to "absorb" other args passed by Transformers modeling ): kv_cache = kv_cache[module.layer_idx] query_states = query_states.transpose(1, 2).squeeze(dim=0) key_states = key_states.transpose(1, 2).squeeze(dim=0) value_states = value_states.transpose(1, 2).squeeze(dim=0) # Take care of updating the cache in-place kv_cache.store(key=key_states, value=value_states, slots=slots, kv_scales=kv_scales) _, num_heads, head_dim = query_states.shape softmax_scale = 1 / math.sqrt(head_dim) if softmax_scale is None else softmax_scale sliding_window = -1 if sliding_window is None else sliding_window if cu_seqlen_prefill is not None: attn_output = attention( query=query_states, key=key_states, value=value_states, kv_cache=kv_cache, kv_scales=kv_scales, seqlen=seqlen, block_tables=block_tables, softmax_scale=softmax_scale, window_size_left=sliding_window, softcap=softcap, ) else: attn_output = paged_attention( query_states, kv_cache, kv_head_mapping, softmax_scale, block_tables, seqlen, max_s, kv_scales=kv_scales, softcap=softcap, window_size_left=sliding_window, ) attn_output = attn_output.view(-1, num_heads * head_dim) return attn_output, None transformers.modeling_utils.ALL_ATTENTION_FUNCTIONS["tgi"] = tgi_flash_attention_forward # The base TP plan of these models has replicated q/k/v. This means that each process will see the full states, # hence we should not divide the number of heads by the world size. This is a known waste of VRAM (the cache # will be fully replicated on each process) and GPU communication (additional all-gather operations), however due # to internal constraints it was not (yet?) possible to circumvent REPLICATED_ATTENTION_MODELS = [ "olmo2", "phi3", ] class TransformersFlashCausalLM(FlashCausalLM): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, speculator: Optional[str] = None, dtype: Optional[torch.dtype] = None, default_dtype=torch.float16, trust_remote_code: bool = False, tokenizer_class=AutoTokenizer, kv_cache_dtype: Optional[torch.dtype] = None, ): self.quantize = quantize self.process_group, rank, world_size = initialize_torch_distributed() if speculator: raise RuntimeError("Speculator decoding is not enabled for AutoModel") if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") dtype = default_dtype if dtype is None else dtype elif SYSTEM == "ipex": if hasattr(torch, "xpu") and torch.xpu.is_available(): device = torch.device(f"xpu:{rank}") else: device = torch.device("cpu") dtype = default_dtype if dtype is None else dtype else: raise ValueError( "Flash `Transformers` modeling backend is not available on cpu." ) tokenizer = tokenizer_class.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) model = AutoModelForCausalLM.from_pretrained( model_id, revision=revision, torch_dtype=dtype, load_in_8bit=quantize == "bitsandbytes", trust_remote_code=trust_remote_code, attn_implementation="tgi", device_map=device if world_size == 1 else None, tp_plan="auto" if world_size > 1 else None, ) torch.distributed.barrier(group=self.process_group) if tokenizer.pad_token_id is None: if model.config.pad_token_id is not None: tokenizer.pad_token_id = model.config.pad_token_id elif model.config.eos_token_id is not None and isinstance( model.config.eos_token_id, int ): tokenizer.pad_token_id = model.config.eos_token_id elif tokenizer.eos_token_id is not None: tokenizer.pad_token_id = tokenizer.eos_token_id else: tokenizer.add_special_tokens({"pad_token": "[PAD]"}) self.num_layers = model.config.num_hidden_layers self.num_heads = model.config.num_attention_heads self.num_kv_heads = model.config.num_key_value_heads # Some models use GQA and different sizes for o_proj # and q_proj, that allows for that. if hasattr(model.config, "head_dim"): self.head_size = model.config.head_dim else: self.head_size = ( model.config.hidden_size // model.config.num_attention_heads ) # Skip it for models in the exception list if model.config.model_type not in REPLICATED_ATTENTION_MODELS: self.num_heads = self.num_heads // self.process_group.size() self.num_kv_heads = ( self.num_kv_heads // self.process_group.size() if self.num_kv_heads > 1 else self.num_kv_heads ) self.cuda_graphs = {} self.kv_cache = [] self.kv_cache_dtype = dtype if kv_cache_dtype is None else kv_cache_dtype if ATTENTION == "flashinfer": from text_generation_server.layers.attention.flashinfer import ( create_prefill_state, create_decode_state, create_prefill_with_paged_kv_state, ) self.prefill_state = create_prefill_state(device=device) self.prefill_with_paged_kv_state = create_prefill_with_paged_kv_state( device=device ) self.decode_state = create_decode_state( device=device, num_heads=self.num_heads, num_kv_heads=self.num_kv_heads, ) self.num_groups = self.num_heads // self.num_kv_heads # Those will never change and will be used in the forwards self.kv_head_mapping = torch.arange( 0, self.num_kv_heads, dtype=torch.int32, device=device ).repeat_interleave(self.num_groups) # This means no scale self.kv_scales = KVScales( torch.tensor(1.0, device=device), torch.tensor(1.0, device=device), ) # Skip FlashCausalLM init. super(FlashCausalLM, self).__init__( model_id=model_id, model=model, tokenizer=tokenizer, requires_padding=False, dtype=dtype, device=device, rank=rank, world_size=world_size, ) # Monkey patch of `self.model.forward` to match `FlashCausalLM`. It avoids duplicating a lot of code # We first copy the original model.forward because we still need it in the monkey patch self.model.original_forward = self.model.forward self.model.forward = self._model_forward torch.distributed.barrier(group=self.process_group) @classmethod def fallback( cls, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, speculator: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): return cls( model_id=model_id, revision=revision, quantize=quantize, speculator=speculator, dtype=dtype, trust_remote_code=trust_remote_code, ) def _model_forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[KVCache], block_tables: torch.Tensor, slots: torch.Tensor, seqlen: Seqlen, max_s: int, lm_head_indices: Optional[torch.Tensor], prefill_cache_indices=None, # not used, but passed to match original signature adapter_data=None, # not supported, but passed to match original signature ): # A value of `None` (i.e. no logit slicing) translates to `0` in Transformers logits_to_keep = lm_head_indices if lm_head_indices is not None else 0 # This is equivalent to `self.model.forward`, see the monkey patch in __init__ logits = self.model.original_forward( input_ids=input_ids.unsqueeze(0), # expand dim to fit Transformers position_ids=position_ids.unsqueeze(0), # expand dim to fit Transformers past_key_values=None, # we use self.kv_cache instead of transformers cache object use_cache=False, # we use self.kv_cache instead of transformers cache object logits_to_keep=logits_to_keep, return_dict=True, cu_seqlen_prefill=cu_seqlen_prefill, kv_cache=kv_cache, block_tables=block_tables, slots=slots, seqlen=seqlen, max_s=max_s, kv_head_mapping=self.kv_head_mapping, kv_scales=self.kv_scales, ).logits.squeeze(dim=0) return logits, None
text-generation-inference/server/text_generation_server/models/transformers_flash_causal_lm.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/transformers_flash_causal_lm.py", "repo_id": "text-generation-inference", "token_count": 4996 }
329
from functools import lru_cache import math import time import torch from typing import List, Optional, DefaultDict from loguru import logger from typing import Dict from text_generation_server.pb.generate_pb2 import GrammarType from outlines.fsm.guide import RegexGuide from transformers import ( LogitsProcessor, PreTrainedTokenizerBase, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, TypicalLogitsWarper, ) mempool = torch.cuda.graph_pool_handle() if torch.cuda.is_available() else None class StaticWarper: def __init__( self, temperature=1.0, top_k=None, top_p=None, typical_p=None, ): self.warpers = [] if temperature is not None and temperature != 1.0: temperature = float(temperature) self.warpers.append(TemperatureLogitsWarper(temperature)) if top_k is not None and top_k != 0: self.warpers.append(TopKLogitsWarper(top_k=top_k)) if top_p is not None and top_p < 1.0: self.warpers.append(TopPLogitsWarper(top_p=top_p)) if typical_p is not None and typical_p < 1.0: self.warpers.append(TypicalLogitsWarper(mass=typical_p)) self.cuda_graph = None self.static_scores = None self.static_warped_scores = None self.static_next_logprob = None def __call__(self, scores): if torch.cuda.is_available(): if self.cuda_graph is None: self.static_scores = scores self.cuda_graph = torch.cuda.CUDAGraph() with torch.cuda.graph(self.cuda_graph, pool=mempool): local_scores = self.static_scores for warper in self.warpers: local_scores = warper(None, local_scores) self.static_warped_scores = local_scores # Compute logprobs self.static_next_logprob = torch.log_softmax( self.static_warped_scores, -1 ) self.static_scores.copy_(scores) self.cuda_graph.replay() return self.static_warped_scores, self.static_next_logprob # CPU branch for warper in self.warpers: scores = warper(None, scores) return scores, torch.log_softmax(scores, -1) @lru_cache(10) def static_warper( temperature: Optional[float], top_k: Optional[int], top_p: Optional[float], typical_p: Optional[float], ) -> StaticWarper: return StaticWarper( temperature=temperature, top_k=top_k, top_p=top_p, typical_p=typical_p ) class HeterogeneousRepetitionPenaltyLogitsProcessor(LogitsProcessor): r""" [`LogitsProcessor`] enforcing an exponential penalty on repeated sequences. This version allows for a separate value for each sample and runs inplace when possible. It doesn't validate inputs. Args: repetition_penalty (`List[float]`): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. """ def __init__(self, penalty: List[float], dtype: torch.dtype, device: torch.device): self.penalty = penalty self.penalty_tensor = torch.tensor( penalty, dtype=dtype, device=device ).unsqueeze(1) def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: score = torch.gather(scores, 1, input_ids) # if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability score = torch.where( score < 0, score * self.penalty_tensor, score / self.penalty_tensor ) scores.scatter_(1, input_ids, score) return scores def filter(self, indices): self.penalty = [self.penalty[i] for i in indices] if any([x != 1.0 for x in self.penalty]): self.penalty_tensor = self.penalty_tensor[indices] return self return None class FrequencyPenaltyLogitsProcessor(LogitsProcessor): r""" Frequency penalty as defined by OpenAI Args: penalty (`float`): The parameter for frequency penalty. 0.0 means no penalty. """ def __init__(self, penalty: float): self.penalty = penalty def __call__( self, input_ids: torch.LongTensor, scores: torch.FloatTensor ) -> torch.FloatTensor: score = torch.gather(scores, 1, input_ids) # if score < 0 then penalty has to be multiplied to reduce the previous token probability score = -torch.where(score < 0, score * self.penalty, score / self.penalty) # set score to 0 where input_ids is a padding token score *= input_ids.ne(0) return scores.scatter_add_(1, input_ids, score) class HeterogeneousFrequencyPenaltyLogitsProcessor(LogitsProcessor): r""" Frequency penalty as defined by OpenAI in https://platform.openai.com/docs/guides/text-generation/parameter-details Args: frequency_penalty (`List[float]`): The parameter for frequency penalty. 0.0 means no penalty. """ def __init__(self, penalty: List[float], dtype: torch.dtype, device: torch.device): self.penalty = penalty self.penalty_tensor = torch.tensor( penalty, dtype=dtype, device=device ).unsqueeze(1) def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: batch_size, input_size = input_ids.size() vocab_size = scores.size(1) # Calculate the frequency for each token so far token_freq = torch.zeros(batch_size, vocab_size, device=input_ids.device) token_freq.scatter_add_( 1, input_ids, torch.ones_like(input_ids, dtype=torch.float) ) token_freq /= input_size # Apply the frequency penalty to logits scores -= token_freq * self.penalty_tensor return scores def filter(self, indices): self.penalty = [self.penalty[i] for i in indices] if any([x != 0.0 for x in self.penalty]): self.penalty_tensor = self.penalty_tensor[indices] return self return None class HeterogeneousTemperatureLogitsWarper: r""" [`LogitsWarper`] for temperature (exponential scaling output probability distribution). This version allows for a separate value for each sample and runs inplace when possible. It doesn't validate inputs. Args: temperature (`float`): The value used to module the logits distribution. """ def __init__( self, temperature: List[float], dtype: torch.dtype, device: torch.device ): self.temperature = temperature self.temperature_tensor = torch.tensor( temperature, dtype=dtype, device=device ).unsqueeze(1) def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: scores.div_(self.temperature_tensor) return scores def filter(self, indices): self.temperature = [self.temperature[i] for i in indices] if any([x != 1.0 for x in self.temperature]): self.temperature_tensor = self.temperature_tensor[indices] return self return None class HeterogeneousTopPLogitsWarper(LogitsProcessor): """ [`LogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. This version allows for a separate value for each sample and runs inplace when possible. It doesn't validate inputs. Args: top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__( self, top_p: List[float], dtype: torch.dtype, device: torch.device, filter_value: float = -math.inf, min_tokens_to_keep: int = 1, ): self.top_p = top_p self.top_p_opposite = 1 - torch.tensor( top_p, dtype=dtype, device=device ).unsqueeze(1) self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: sorted_logits, sorted_indices = torch.sort(scores, descending=False) probs = sorted_logits.softmax(dim=-1) # This is way faster for some reason for i in range(probs.shape[0]): probs[i] = probs[i].cumsum(dim=-1) # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) sorted_indices_to_remove = probs <= self.top_p_opposite # Keep at least min_tokens_to_keep sorted_indices_to_remove[..., -self.min_tokens_to_keep :] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter( 1, sorted_indices, sorted_indices_to_remove ) warped_scores = scores.masked_fill_(indices_to_remove, self.filter_value) return warped_scores def filter(self, indices): self.top_p = [self.top_p[i] for i in indices] if any([x < 1.0 for x in self.top_p]): self.top_p_opposite = self.top_p_opposite[indices] return self return None class HeterogeneousTopKLogitsWarper(LogitsProcessor): r""" [`LogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. This version allows for a separate value for each sample and runs inplace when possible. It doesn't validate inputs. Args: top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__( self, top_k: List[int], device: torch.device, filter_value: float = -math.inf, min_tokens_to_keep: int = 1, ): self.top_k = top_k self.max_top_k = max(top_k) # value - 1 as we will use top_k to index and python uses 0 based numbering self.top_k_tensor = torch.tensor( [max(x - 1, min_tokens_to_keep - 1) for x in top_k], dtype=torch.int64, device=device, ).unsqueeze(1) # 0 is a special value that disables top_k warping for this member of the batch disabled = [x == 0 for x in top_k] if any(disabled): self.top_k_disabled_mask = torch.tensor( disabled, dtype=torch.bool, device=device ).view(-1, 1) else: self.top_k_disabled_mask = None self.filter_value = filter_value def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: # If max_top_k is superior to the vocab, we need to clamp or the warper will fail if scores.size(-1) < self.max_top_k: max_top_k = scores.size(-1) top_k = torch.clamp_max(self.top_k_tensor, max_top_k) else: max_top_k = self.max_top_k top_k = self.top_k_tensor # Get the kth score for each member of the batch kth_scores = torch.gather(torch.topk(scores, max_top_k)[0], 1, top_k) # Mask member of kth_scores that do not want to use top_k warping if self.top_k_disabled_mask is not None: kth_scores.masked_fill_(self.top_k_disabled_mask, self.filter_value) # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = scores < kth_scores scores.masked_fill_(indices_to_remove, self.filter_value) return scores def filter(self, indices): self.top_k = [self.top_k[i] for i in indices] disabled = [x == 0 for x in self.top_k] if not all(disabled): self.top_k_tensor = self.top_k_tensor[indices] self.max_top_k = max(self.top_k) if self.top_k_disabled_mask is not None: self.top_k_disabled_mask = ( self.top_k_disabled_mask[indices] if any(disabled) else None ) return self return None class HeterogeneousTypicalLogitsWarper(LogitsProcessor): r""" [`LogitsWarper`] that performs typical decoding. See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information. This version allows for a separate value for each sample and runs inplace when possible. It doesn't validate inputs. Args: mass (`float`): Value of typical_p between 0 and 1 inclusive, defaults to 0.9. filter_value (`float`, *optional*, defaults to `-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (`int`, *optional*, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__( self, mass: List[float], dtype: torch.dtype, device: torch.device, filter_value: float = -math.inf, min_tokens_to_keep: int = 1, ): self.mass = mass self.mass_tensor = torch.tensor(mass, dtype=dtype, device=device).unsqueeze(1) # 1 is a special value that disables typical_p warping for this member of the batch disabled = [x == 1.0 for x in mass] if any(disabled): self.disabled_mask = torch.tensor(disabled, dtype=torch.bool, device=device) else: self.disabled_mask = None self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: # calculate entropy normalized = torch.nn.functional.log_softmax(scores, dim=-1) p = torch.exp(normalized) ent = -(normalized * p).nansum(-1, keepdim=True) # shift and sort shifted_scores = torch.abs((-normalized) - ent) sorted_scores, sorted_indices = torch.sort(shifted_scores, descending=False) sorted_logits = scores.gather(-1, sorted_indices) probs = sorted_logits.softmax(dim=-1) # This is way faster for some reason for i in range(probs.shape[0]): probs[i] = probs[i].cumsum(dim=-1) # Remove tokens with cumulative mass above the threshold last_ind = (probs < self.mass_tensor).sum(dim=1) last_ind[last_ind < 0] = 0 if self.disabled_mask is not None: last_ind.masked_fill_(self.disabled_mask, scores.shape[-1] - 1) sorted_indices_to_remove = sorted_scores > sorted_scores.gather( 1, last_ind.view(-1, 1) ) if self.min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0 indices_to_remove = sorted_indices_to_remove.scatter( 1, sorted_indices, sorted_indices_to_remove ) warped_scores = scores.masked_fill_(indices_to_remove, self.filter_value) return warped_scores def filter(self, indices): self.mass = [self.mass[i] for i in indices] disabled = [x == 1.0 for x in self.mass] if not all(disabled): self.mass_tensor = self.mass_tensor[indices] if self.disabled_mask is not None: self.disabled_mask = ( self.disabled_mask[indices] if any(disabled) else None ) return self return None class HeterogeneousProcessorWrapper(LogitsProcessor): r""" A wrapper for logit warpers or processors without heterogeneous parameter support. Args: processors (`Dict[int, LogitsProcessor]`): A mapping of sample indices to logit warpers or processors, to be run sequentially. """ def __init__( self, processors: Dict[int, LogitsProcessor], ): self.processors = processors def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: for i, processor in self.processors.items(): scores[i : i + 1] = processor(input_ids[i : i + 1], scores[i : i + 1]) return scores def filter(self, indices): new_processors = {} for i, idx in enumerate(indices): if idx in self.processors: new_processors[i] = self.processors[idx] if new_processors: self.processors = new_processors return self return None class GrammarLogitProcessor(LogitsProcessor): fsm_state: DefaultDict[int, int] fsm: RegexGuide def __init__( self, tokenizer: Optional[PreTrainedTokenizerBase], device: str, grammar: str, grammar_type: GrammarType, ): self.device = device self.tokenizer = GrammarLogitProcessor._cached_adapt_tokenizer(tokenizer) self.fsm = GrammarLogitProcessor._cached_compile_fsm( grammar_type, grammar, self.tokenizer ) def __call__( self, logits: torch.Tensor, fsm_grammar_state: int, ): if fsm_grammar_state == -1 or self.fsm is None: return logits allowed_tokens = self.fsm.get_next_instruction(fsm_grammar_state).tokens mask = torch.full_like(logits, -math.inf) if allowed_tokens is not None: mask[:, allowed_tokens] = 0 biased_scores = logits + mask return biased_scores def advance(self, next_token_id, fsm_grammar_state): return GrammarLogitProcessor._advance( next_token_id, fsm_grammar_state, self.fsm ) @staticmethod def _advance(next_token_id, fsm_grammar_state, fsm): if fsm_grammar_state == -1: return fsm_grammar_state return fsm.get_next_state(fsm_grammar_state, next_token_id) # TODO: move grammar compilation into the router @staticmethod @lru_cache(maxsize=32, typed=True) def _cached_compile_fsm( grammar_type: GrammarType, schema: str, tokenizer: Optional[PreTrainedTokenizerBase], ): start_time = time.time() if grammar_type == GrammarType.GRAMMAR_TYPE_JSON: # JSON schema is compiled by the v3 router. logger.error( "Non-regex grammars must be compiled by the router, grammar won't be enforced" ) # allows everything schema = "(.*?)" fsm = RegexGuide.from_regex(schema, tokenizer) logger.debug(f"Compiled FSM in {time.time() - start_time:.2f}s") return fsm @staticmethod @lru_cache(maxsize=32, typed=True) def _cached_adapt_tokenizer(tokenizer): """Adapt tokenizer to work with the FSM. The API of Outlines tokenizers is slightly different to that of `transformers`. In addition we need to handle the missing spaces to Llama's tokenizer to be able to compile FSMs for this model. """ start_time = time.time() tokenizer.vocabulary = tokenizer.get_vocab() tokenizer.special_tokens = set(tokenizer.all_special_tokens) def convert_token_to_string(token: str) -> str: from transformers.file_utils import SPIECE_UNDERLINE string = tokenizer.convert_tokens_to_string([token]) # A hack to handle missing spaces to HF's Llama tokenizers if token.startswith(SPIECE_UNDERLINE) or token == "<0x20>": return " " + string return string tokenizer.convert_token_to_string = convert_token_to_string logger.debug(f"Adapted tokenizer in {time.time() - start_time:.2f}s") return tokenizer class HeterogeneousGrammarLogitProcessor(LogitsProcessor): def __init__(self, tokenizer, device, grammars, grammar_types): self.device = device self.tokenizer = GrammarLogitProcessor._cached_adapt_tokenizer(tokenizer) self.fsms = [] for grammar, grammar_type in zip(grammars, grammar_types): if len(grammar) == 0: self.fsms.append(None) continue fsm = GrammarLogitProcessor._cached_compile_fsm( grammar_type, grammar, self.tokenizer ) self.fsms.append(fsm) def __call__( self, logits: torch.Tensor, fsm_grammar_states: List[int], ): mask = torch.full_like(logits, -math.inf) for i in range(logits.shape[0]): fsm = self.fsms[i] if fsm_grammar_states[i] == -1 or fsm is None: continue allowed_tokens = fsm.get_next_instruction(fsm_grammar_states[i]).tokens if allowed_tokens is not None: mask[i, allowed_tokens] = 0 logits[i] += mask[i] return logits def advance_batch(self, next_token_ids, fsm_grammar_states): return [ GrammarLogitProcessor._advance( next_token_ids[i], fsm_grammar_states[i], self.fsms[i] ) for i in range(len(next_token_ids)) ] def advance_at_index(self, next_token_id, fsm_grammar_state, index): if self.fsms[index] is None: return fsm_grammar_state return GrammarLogitProcessor._advance( next_token_id, fsm_grammar_state, self.fsms[index] ) def filter(self, indices): new_fsms = [] for i in indices: new_fsms.append(self.fsms[i]) self.fsms = new_fsms return self
text-generation-inference/server/text_generation_server/utils/logits_process.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/logits_process.py", "repo_id": "text-generation-inference", "token_count": 9944 }
330
<p align="center"> <br> <img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/> <br> <p> <p align="center"> <img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg"> <a href="https://github.com/huggingface/tokenizers/blob/main/LICENSE"> <img alt="GitHub" src="https://img.shields.io/github/license/huggingface/tokenizers.svg?color=blue&cachedrop"> </a> <a href="https://pepy.tech/project/tokenizers"> <img src="https://pepy.tech/badge/tokenizers/week" /> </a> </p> Provides an implementation of today's most used tokenizers, with a focus on performance and versatility. ## Main features: - Train new vocabularies and tokenize, using today's most used tokenizers. - Extremely fast (both training and tokenization), thanks to the Rust implementation. Takes less than 20 seconds to tokenize a GB of text on a server's CPU. - Easy to use, but also extremely versatile. - Designed for research and production. - Normalization comes with alignments tracking. It's always possible to get the part of the original sentence that corresponds to a given token. - Does all the pre-processing: Truncate, Pad, add the special tokens your model needs. ## Performances Performances can vary depending on hardware, but running the [~/bindings/python/benches/test_tiktoken.py](bindings/python/benches/test_tiktoken.py) should give the following on a g6 aws instance: ![image](https://github.com/user-attachments/assets/2b913d4b-e488-4cbc-b542-f90a6c40643d) ## Bindings We provide bindings to the following languages (more to come!): - [Rust](https://github.com/huggingface/tokenizers/tree/main/tokenizers) (Original implementation) - [Python](https://github.com/huggingface/tokenizers/tree/main/bindings/python) - [Node.js](https://github.com/huggingface/tokenizers/tree/main/bindings/node) - [Ruby](https://github.com/ankane/tokenizers-ruby) (Contributed by @ankane, external repo) ## Installation You can install from source using: ```bash pip install git+https://github.com/huggingface/tokenizers.git#subdirectory=bindings/python ``` our install the released versions with ```bash pip install tokenizers ``` ## Quick example using Python: Choose your model between Byte-Pair Encoding, WordPiece or Unigram and instantiate a tokenizer: ```python from tokenizers import Tokenizer from tokenizers.models import BPE tokenizer = Tokenizer(BPE()) ``` You can customize how pre-tokenization (e.g., splitting into words) is done: ```python from tokenizers.pre_tokenizers import Whitespace tokenizer.pre_tokenizer = Whitespace() ``` Then training your tokenizer on a set of files just takes two lines of codes: ```python from tokenizers.trainers import BpeTrainer trainer = BpeTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"]) tokenizer.train(files=["wiki.train.raw", "wiki.valid.raw", "wiki.test.raw"], trainer=trainer) ``` Once your tokenizer is trained, encode any text with just one line: ```python output = tokenizer.encode("Hello, y'all! How are you 😁 ?") print(output.tokens) # ["Hello", ",", "y", "'", "all", "!", "How", "are", "you", "[UNK]", "?"] ``` Check the [documentation](https://huggingface.co/docs/tokenizers/index) or the [quicktour](https://huggingface.co/docs/tokenizers/quicktour) to learn more!
tokenizers/README.md/0
{ "file_path": "tokenizers/README.md", "repo_id": "tokenizers", "token_count": 1127 }
331
/* eslint-disable */ var globRequire = require; describe("pipelineExample", () => { // This is a hack to let us require using path similar to what the user has to use function require(mod: string) { if (mod.startsWith("tokenizers")) { // let path = mod.slice("tokenizers".length); return globRequire("../../"); } else { return globRequire(mod); } } let console = { log: (..._args: any[]) => {} }; it("shows pipeline parts", async () => { // START reload_tokenizer let { Tokenizer } = require("tokenizers"); let tokenizer = Tokenizer.fromFile("data/tokenizer-wiki.json"); // END reload_tokenizer // START setup_normalizer let { sequenceNormalizer, nfdNormalizer, stripAccentsNormalizer } = require("tokenizers"); let normalizer = sequenceNormalizer([nfdNormalizer(), stripAccentsNormalizer()]); // END setup_normalizer // START test_normalizer let normalized = normalizer.normalizeString("Héllò hôw are ü?") // "Hello how are u?" // END test_normalizer expect(normalized).toEqual("Hello how are u?"); // START replace_normalizer tokenizer.setNormalizer(normalizer) // END replace_normalizer // START setup_pre_tokenizer let { whitespacePreTokenizer } = require("tokenizers"); var preTokenizer = whitespacePreTokenizer(); var preTokenized = preTokenizer.preTokenizeString("Hello! How are you? I'm fine, thank you."); // END setup_pre_tokenizer expect(preTokenized).toEqual([ ["Hello", [0, 5]], ["!", [5, 6]], ["How", [7, 10]], ["are", [11, 14]], ["you", [15, 18]], ["?", [18, 19]], ["I", [20, 21]], ["'", [21, 22]], ['m', [22, 23]], ["fine", [24, 28]], [",", [28, 29]], ["thank", [30, 35]], ["you", [36, 39]], [".", [39, 40]] ]); // START combine_pre_tokenizer let { sequencePreTokenizer, digitsPreTokenizer } = require("tokenizers"); var preTokenizer = sequencePreTokenizer([whitespacePreTokenizer(), digitsPreTokenizer(true)]); var preTokenized = preTokenizer.preTokenizeString("Call 911!"); // END combine_pre_tokenizer // START replace_pre_tokenizer tokenizer.setPreTokenizer(preTokenizer) // END replace_pre_tokenizer // START setup_processor let { templateProcessing } = require("tokenizers"); tokenizer.setPostProcessor(templateProcessing( "[CLS] $A [SEP]", "[CLS] $A [SEP] $B:1 [SEP]:1", [["[CLS]", 1], ["[SEP]", 2]] )); // END setup_processor // START test_decoding let output = await tokenizer.encode("Hello, y'all! How are you 😁 ?"); console.log(output.getIds()); // [1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2] let decoded = await tokenizer.decode([1, 27253, 16, 93, 11, 5097, 5, 7961, 5112, 6218, 0, 35, 2], true); // "Hello , y ' all ! How are you ?" // END test_decoding expect(decoded).toEqual("Hello , y ' all ! How are you ?"); }); it.skip("trains the tokenizer", async () => { // START bert_setup_tokenizer let { Tokenizer } = require("tokenizers"); let { WordPiece } = require("tokenizers"); let bertTokenizer = new Tokenizer(WordPiece.init({}, { unkToken: "[UNK]" })); // END bert_setup_tokenizer // START bert_setup_normalizer let { sequenceNormalizer, lowercaseNormalizer, nfdNormalizer, stripAccentsNormalizer } = require("tokenizers"); bertTokenizer.setNormalizer(sequenceNormalizer([ nfdNormalizer(), lowercaseNormalizer(), stripAccentsNormalizer() ])) // END bert_setup_normalizer // START bert_setup_pre_tokenizer let { whitespacePreTokenizer } = require("tokenizers"); bertTokenizer.setPreTokenizer(whitespacePreTokenizer()); // END bert_setup_pre_tokenizer // START bert_setup_processor let { templateProcessing } = require("tokenizers"); bertTokenizer.setPostProcessor(templateProcessing( "[CLS] $A [SEP]", "[CLS] $A [SEP] $B:1 [SEP]:1", [["[CLS]", 1], ["[SEP]", 2]] )); // END bert_setup_processor // START bert_train_tokenizer let { wordPieceTrainer } = require("tokenizers"); let trainer = wordPieceTrainer({ vocabSize: 30522, specialTokens: ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"] }); let files = ["test", "train", "valid"].map(split => `data/wikitext-103-raw/wiki.${split}.raw`); bertTokenizer.train(files, trainer); bertTokenizer.save("data/bert-wiki.json") // END bert_train_tokenizer }); it("shows a full bert example", async () => { let { Tokenizer } = require("tokenizers"); let bertTokenizer = await Tokenizer.fromFile("data/bert-wiki.json") // START bert_test_decoding let output = await bertTokenizer.encode("Welcome to the 🤗 Tokenizers library."); console.log(output.getTokens()); // ["[CLS]", "welcome", "to", "the", "[UNK]", "tok", "##eni", "##zer", "##s", "library", ".", "[SEP]"] var decoded = await bertTokenizer.decode(output.getIds(), true); // "welcome to the tok ##eni ##zer ##s library ." // END bert_test_decoding expect(decoded).toEqual("welcome to the tok ##eni ##zer ##s library ."); // START bert_proper_decoding let { wordPieceDecoder } = require("tokenizers"); bertTokenizer.setDecoder(wordPieceDecoder()); var decoded = await bertTokenizer.decode(output.getIds(), true); // "welcome to the tokenizers library." // END bert_proper_decoding expect(decoded).toEqual("welcome to the tokenizers library."); }); });
tokenizers/bindings/node/examples/documentation/pipeline.test.ts/0
{ "file_path": "tokenizers/bindings/node/examples/documentation/pipeline.test.ts", "repo_id": "tokenizers", "token_count": 2710 }
332
# `tokenizers-android-arm-eabi` This is the **armv7-linux-androideabi** binary for `tokenizers`
tokenizers/bindings/node/npm/android-arm-eabi/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/android-arm-eabi/README.md", "repo_id": "tokenizers", "token_count": 35 }
333
# `tokenizers-linux-x64-gnu` This is the **x86_64-unknown-linux-gnu** binary for `tokenizers`
tokenizers/bindings/node/npm/linux-x64-gnu/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/linux-x64-gnu/README.md", "repo_id": "tokenizers", "token_count": 36 }
334
use crate::arc_rwlock_serde; use crate::tasks::models::{BPEFromFilesTask, WordLevelFromFilesTask, WordPieceFromFilesTask}; use crate::trainers::Trainer; use ahash::AHashMap; use napi::bindgen_prelude::*; use napi_derive::napi; use serde::{Deserialize, Serialize}; use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::sync::{Arc, RwLock}; use tokenizers as tk; use tokenizers::models::bpe::{BpeBuilder, Merges}; use tokenizers::models::wordlevel::WordLevelBuilder; use tokenizers::models::wordpiece::WordPieceBuilder; #[napi] #[derive(Clone, Serialize, Deserialize)] pub struct Model { #[serde(flatten, with = "arc_rwlock_serde")] pub(crate) model: Option<Arc<RwLock<tk::models::ModelWrapper>>>, } impl<M> From<M> for Model where M: Into<tk::models::ModelWrapper>, { fn from(wrapper: M) -> Self { Self { model: Some(Arc::new(RwLock::new(wrapper.into()))), } } } #[napi(js_name = "BPE")] pub struct Bpe {} #[napi] impl Bpe { #[napi(factory, ts_return_type = "Model")] pub fn empty() -> Result<Model> { let bpe = tk::models::bpe::BPE::default(); Ok(Model { model: Some(Arc::new(RwLock::new(bpe.into()))), }) } #[napi(factory, ts_return_type = "Model")] pub fn init( vocab: HashMap<String, u32>, merges: Merges, options: Option<BpeOptions>, ) -> Result<Model> { let options = options.unwrap_or_default(); let vocab: AHashMap<_, _> = vocab.into_iter().collect(); let mut builder = tk::models::bpe::BPE::builder().vocab_and_merges(vocab, merges); builder = options.apply_to_bpe_builder(builder); let model = builder .build() .map_err(|e| Error::from_reason(e.to_string()))?; Ok(Model { model: Some(Arc::new(RwLock::new(model.into()))), }) } #[napi(ts_return_type = "Promise<Model>")] pub fn from_file( vocab: String, merges: String, options: Option<BpeOptions>, ) -> AsyncTask<BPEFromFilesTask> { let options = options.unwrap_or_default(); let mut builder = tk::models::bpe::BPE::from_file(&vocab, &merges); builder = options.apply_to_bpe_builder(builder); AsyncTask::new(BPEFromFilesTask { builder: Some(builder), }) } } impl tk::Model for Model { type Trainer = Trainer; fn tokenize(&self, sequence: &str) -> tk::Result<Vec<tk::Token>> { self .model .as_ref() .ok_or("Uninitialized Model")? .read() .unwrap() .tokenize(sequence) } fn token_to_id(&self, token: &str) -> Option<u32> { self.model.as_ref()?.read().unwrap().token_to_id(token) } fn id_to_token(&self, id: u32) -> Option<String> { self.model.as_ref()?.read().unwrap().id_to_token(id) } fn get_vocab(&self) -> HashMap<String, u32> { self .model .as_ref() .expect("Uninitialized Model") .read() .unwrap() .get_vocab() } fn get_vocab_size(&self) -> usize { self .model .as_ref() .expect("Uninitialized Model") .read() .unwrap() .get_vocab_size() } fn save(&self, folder: &Path, name: Option<&str>) -> tk::Result<Vec<PathBuf>> { self .model .as_ref() .ok_or("Uninitialized Model")? .read() .unwrap() .save(folder, name) } fn get_trainer(&self) -> Self::Trainer { self .model .as_ref() .expect("Uninitialized Model") .read() .unwrap() .get_trainer() .into() } } #[derive(Default)] #[napi(object)] pub struct BpeOptions { pub cache_capacity: Option<u32>, pub dropout: Option<f64>, pub unk_token: Option<String>, pub continuing_subword_prefix: Option<String>, pub end_of_word_suffix: Option<String>, pub fuse_unk: Option<bool>, pub byte_fallback: Option<bool>, } impl BpeOptions { fn apply_to_bpe_builder(self, mut builder: BpeBuilder) -> BpeBuilder { if let Some(cache_capacity) = self.cache_capacity { builder = builder.cache_capacity(cache_capacity as usize); } if let Some(dropout) = self.dropout { builder = builder.dropout(dropout as f32); } if let Some(unk_token) = self.unk_token { builder = builder.unk_token(unk_token); } if let Some(continuing_subword_prefix) = self.continuing_subword_prefix { builder = builder.continuing_subword_prefix(continuing_subword_prefix); } if let Some(end_of_word_suffix) = self.end_of_word_suffix { builder = builder.end_of_word_suffix(end_of_word_suffix); } if let Some(fuse_unk) = self.fuse_unk { builder = builder.fuse_unk(fuse_unk); } if let Some(byte_fallback) = self.byte_fallback { builder = builder.byte_fallback(byte_fallback); } builder } } #[derive(Default)] #[napi(object)] pub struct WordPieceOptions { pub unk_token: Option<String>, pub continuing_subword_prefix: Option<String>, pub max_input_chars_per_word: Option<u32>, } impl WordPieceOptions { fn apply_to_wordpiece_builder(self, mut builder: WordPieceBuilder) -> WordPieceBuilder { if let Some(token) = self.unk_token { builder = builder.unk_token(token); } if let Some(prefix) = self.continuing_subword_prefix { builder = builder.continuing_subword_prefix(prefix); } if let Some(max) = self.max_input_chars_per_word { builder = builder.max_input_chars_per_word(max as usize); } builder } } #[napi] pub struct WordPiece {} #[napi] impl WordPiece { #[napi(factory, ts_return_type = "Model")] pub fn init(vocab: HashMap<String, u32>, options: Option<WordPieceOptions>) -> Result<Model> { let options = options.unwrap_or_default(); let mut builder = tk::models::wordpiece::WordPiece::builder() .vocab(vocab.into_iter().collect::<AHashMap<_, _>>()); builder = options.apply_to_wordpiece_builder(builder); let model = builder .build() .map_err(|e| Error::from_reason(e.to_string()))?; Ok(Model { model: Some(Arc::new(RwLock::new(model.into()))), }) } #[napi(factory)] pub fn empty() -> Model { let wordpiece = tk::models::wordpiece::WordPiece::default(); Model { model: Some(Arc::new(RwLock::new(wordpiece.into()))), } } #[napi(ts_return_type = "Promise<Model>")] pub fn from_file( vocab: String, options: Option<WordPieceOptions>, ) -> AsyncTask<WordPieceFromFilesTask> { let options = options.unwrap_or_default(); let mut builder = tk::models::wordpiece::WordPiece::from_file(&vocab); builder = options.apply_to_wordpiece_builder(builder); AsyncTask::new(WordPieceFromFilesTask { builder: Some(builder), }) } } #[derive(Default)] #[napi(object)] pub struct WordLevelOptions { pub unk_token: Option<String>, } impl WordLevelOptions { fn apply_to_wordlevel_builder(self, mut builder: WordLevelBuilder) -> WordLevelBuilder { if let Some(token) = self.unk_token { builder = builder.unk_token(token); } builder } } #[napi] pub struct WordLevel {} #[napi] impl WordLevel { #[napi(factory, ts_return_type = "Model")] pub fn init(vocab: HashMap<String, u32>, options: Option<WordLevelOptions>) -> Result<Model> { let options = options.unwrap_or_default(); let mut builder = tk::models::wordlevel::WordLevel::builder().vocab(vocab.into_iter().collect()); builder = options.apply_to_wordlevel_builder(builder); let model = builder .build() .map_err(|e| Error::from_reason(e.to_string()))?; Ok(Model { model: Some(Arc::new(RwLock::new(model.into()))), }) } #[napi(factory)] pub fn empty() -> Model { let wordlevel = tk::models::wordlevel::WordLevel::default(); Model { model: Some(Arc::new(RwLock::new(wordlevel.into()))), } } #[napi(ts_return_type = "Promise<Model>")] pub fn from_file( vocab: String, options: Option<WordLevelOptions>, ) -> AsyncTask<WordLevelFromFilesTask> { let options = options.unwrap_or_default(); let mut builder = tk::models::wordlevel::WordLevel::builder().files(vocab); builder = options.apply_to_wordlevel_builder(builder); AsyncTask::new(WordLevelFromFilesTask { builder: Some(builder), }) } } #[derive(Default)] #[napi(object)] pub struct UnigramOptions { pub unk_id: Option<u32>, pub byte_fallback: Option<bool>, } #[napi] pub struct Unigram {} #[napi] impl Unigram { #[napi(factory, ts_return_type = "Model")] pub fn init(vocab: Vec<(String, f64)>, options: Option<UnigramOptions>) -> Result<Model> { let options = options.unwrap_or_default(); let unigram = tk::models::unigram::Unigram::from( vocab, options.unk_id.map(|u| u as usize), options.byte_fallback.unwrap_or(false), ) .map_err(|e| Error::from_reason(e.to_string()))?; Ok(Model { model: Some(Arc::new(RwLock::new(unigram.into()))), }) } #[napi(factory, ts_return_type = "Model")] pub fn empty() -> Model { let unigram = tk::models::unigram::Unigram::default(); Model { model: Some(Arc::new(RwLock::new(unigram.into()))), } } }
tokenizers/bindings/node/src/models.rs/0
{ "file_path": "tokenizers/bindings/node/src/models.rs", "repo_id": "tokenizers", "token_count": 3778 }
335
[package] name = "tokenizers-python" version = "0.21.4-dev.0" authors = ["Anthony MOI <m.anthony.moi@gmail.com>"] edition = "2021" [lib] name = "tokenizers" crate-type = ["cdylib"] [dependencies] rayon = "1.10" serde = { version = "1.0", features = ["rc", "derive"] } serde_json = "1.0" libc = "0.2" env_logger = "0.11" pyo3 = { version = "0.25", features = ["abi3", "abi3-py39", "py-clone"] } numpy = "0.25" ndarray = "0.16" itertools = "0.14" ahash = { version = "0.8.11", features = ["serde"] } [dependencies.tokenizers] path = "../../tokenizers" [dev-dependencies] tempfile = "3.10" pyo3 = { version = "0.25", features = ["auto-initialize"] } [features] default = ["pyo3/extension-module"]
tokenizers/bindings/python/Cargo.toml/0
{ "file_path": "tokenizers/bindings/python/Cargo.toml", "repo_id": "tokenizers", "token_count": 302 }
336
from .base_tokenizer import BaseTokenizer from .bert_wordpiece import BertWordPieceTokenizer from .byte_level_bpe import ByteLevelBPETokenizer from .char_level_bpe import CharBPETokenizer from .sentencepiece_bpe import SentencePieceBPETokenizer from .sentencepiece_unigram import SentencePieceUnigramTokenizer
tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/implementations/__init__.py", "repo_id": "tokenizers", "token_count": 94 }
337