text stringlengths 5 631k | id stringlengths 14 178 | metadata dict | __index_level_0__ int64 0 647 |
|---|---|---|---|
# Copyright 2021 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A subclass of `Trainer` specific to Question-Answering tasks
"""
import math
import time
from typing import Optional
from torch.utils.data import Dataset
from transformers import Seq2SeqTrainer, is_torch_xla_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_xla_available():
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class QuestionAnsweringSeq2SeqTrainer(Seq2SeqTrainer):
def __init__(self, *args, eval_examples=None, post_process_function=None, **kwargs):
super().__init__(*args, **kwargs)
self.eval_examples = eval_examples
self.post_process_function = post_process_function
# def evaluate(self, eval_dataset=None, eval_examples=None, ignore_keys=None, metric_key_prefix: str = "eval"):
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
eval_examples=None,
ignore_keys: Optional[list[str]] = None,
metric_key_prefix: str = "eval",
**gen_kwargs,
) -> dict[str, float]:
gen_kwargs = gen_kwargs.copy()
# Use legacy argument setting if a) the option is not explicitly passed; and b) the argument is set in the
# training args
if gen_kwargs.get("max_length") is None and self.args.generation_max_length is not None:
gen_kwargs["max_length"] = self.args.generation_max_length
if gen_kwargs.get("num_beams") is None and self.args.generation_num_beams is not None:
gen_kwargs["num_beams"] = self.args.generation_num_beams
self._gen_kwargs = gen_kwargs
eval_dataset = self.eval_dataset if eval_dataset is None else eval_dataset
eval_dataloader = self.get_eval_dataloader(eval_dataset)
eval_examples = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
output = eval_loop(
eval_dataloader,
description="Evaluation",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
finally:
self.compute_metrics = compute_metrics
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
eval_preds = self.post_process_function(eval_examples, eval_dataset, output)
metrics = self.compute_metrics(eval_preds)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
metrics.update(output.metrics)
else:
metrics = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(metrics)
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def predict(
self, predict_dataset, predict_examples, ignore_keys=None, metric_key_prefix: str = "test", **gen_kwargs
):
self._gen_kwargs = gen_kwargs.copy()
predict_dataloader = self.get_test_dataloader(predict_dataset)
# Temporarily disable metric computation, we will do it in the loop here.
compute_metrics = self.compute_metrics
self.compute_metrics = None
start_time = time.time()
eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
output = eval_loop(
predict_dataloader,
description="Prediction",
# No point gathering the predictions if there are no metrics, otherwise we defer to
# self.args.prediction_loss_only
prediction_loss_only=True if compute_metrics is None else None,
ignore_keys=ignore_keys,
metric_key_prefix=metric_key_prefix,
)
finally:
self.compute_metrics = compute_metrics
total_batch_size = self.args.eval_batch_size * self.args.world_size
if f"{metric_key_prefix}_jit_compilation_time" in output.metrics:
start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"]
output.metrics.update(
speed_metrics(
metric_key_prefix,
start_time,
num_samples=output.num_samples,
num_steps=math.ceil(output.num_samples / total_batch_size),
)
)
if self.post_process_function is None or self.compute_metrics is None:
return output
predictions = self.post_process_function(predict_examples, predict_dataset, output, "predict")
metrics = self.compute_metrics(predictions)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
metrics.update(output.metrics)
return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=metrics)
| transformers/examples/pytorch/question-answering/trainer_seq2seq_qa.py/0 | {
"file_path": "transformers/examples/pytorch/question-answering/trainer_seq2seq_qa.py",
"repo_id": "transformers",
"token_count": 3058
} | 427 |
#!/usr/bin/env python
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# /// script
# dependencies = [
# "transformers @ git+https://github.com/huggingface/transformers.git",
# "accelerate >= 0.12.0",
# "datasets >= 1.8.0",
# "sentencepiece != 0.1.92",
# "protobuf",
# "rouge-score",
# "nltk",
# "py7zr",
# "torch >= 1.3",
# "evaluate",
# ]
# ///
"""
Fine-tuning the library models for sequence to sequence.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import nltk # Here to have a nice missing dependency error message early on
import numpy as np
from datasets import load_dataset
from filelock import FileLock
import transformers
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
MBart50Tokenizer,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, is_offline_mode, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.56.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
logger = logging.getLogger(__name__)
try:
nltk.data.find("tokenizers/punkt")
except (LookupError, OSError):
if is_offline_mode():
raise LookupError(
"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
)
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
# A list of all multilingual tokenizer which require lang attribute.
MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast]
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `hf auth login` (stored in `~/.huggingface`)."
)
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether to trust the execution of code from datasets/models defined on the Hub."
" This option should only be set to `True` for repositories you trust and in which you have read the"
" code, as it will execute code present on the Hub on your local machine."
)
},
)
resize_position_embeddings: Optional[bool] = field(
default=None,
metadata={
"help": (
"Whether to automatically resize the position embeddings if `max_source_length` exceeds "
"the model's position embeddings."
)
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
lang: Optional[str] = field(default=None, metadata={"help": "Language id for summarization."})
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
summary_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": (
"An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)."
)
},
)
test_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
num_beams: Optional[int] = field(
default=1,
metadata={
"help": (
"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
)
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
source_prefix: Optional[str] = field(
default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
forced_bos_token: Optional[str] = field(
default=None,
metadata={
"help": (
"The token to force as the first generated token after the decoder_start_token_id. "
"Useful for multilingual models like mBART where the first generated token"
"needs to be the target language token (Usually it is the target language token)"
)
},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
and self.test_file is None
):
raise ValueError("Need either a dataset name or a training, validation, or test file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.test_file is not None:
extension = self.test_file.split(".")[-1]
assert extension in ["csv", "json"], "`test_file` should be a csv or a json file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
summarization_name_mapping = {
"amazon_reviews_multi": ("review_body", "review_title"),
"big_patent": ("description", "abstract"),
"cnn_dailymail": ("article", "highlights"),
"orange_sum": ("text", "summary"),
"pn_summary": ("article", "summary"),
"psc": ("extract_text", "summary_text"),
"samsum": ("dialogue", "summary"),
"thaisum": ("body", "summary"),
"xglue": ("news_body", "news_title"),
"xsum": ("document", "summary"),
"wiki_summary": ("article", "highlights"),
"multi_news": ("document", "summary"),
}
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_summarization", model_args, data_args)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
if data_args.source_prefix is None and model_args.model_name_or_path in [
"google-t5/t5-small",
"google-t5/t5-base",
"google-t5/t5-large",
"google-t5/t5-3b",
"google-t5/t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with "
"`--source_prefix 'summarize: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files this script will use the first column for the full texts and the second column for the
# summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
token=model_args.token,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test.
embedding_size = model.get_input_embeddings().weight.shape[0]
if len(tokenizer) > embedding_size:
model.resize_token_embeddings(len(tokenizer))
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
if (
hasattr(model.config, "max_position_embeddings")
and model.config.max_position_embeddings < data_args.max_source_length
):
if model_args.resize_position_embeddings is None:
logger.warning(
"Increasing the model's number of position embedding vectors from"
f" {model.config.max_position_embeddings} to {data_args.max_source_length}."
)
model.resize_position_embeddings(data_args.max_source_length)
elif model_args.resize_position_embeddings:
model.resize_position_embeddings(data_args.max_source_length)
else:
raise ValueError(
f"`--max_source_length` is set to {data_args.max_source_length}, but the model only has"
f" {model.config.max_position_embeddings} position encodings. Consider either reducing"
f" `--max_source_length` to {model.config.max_position_embeddings} or to automatically resize the"
" model's position encodings by passing `--resize_position_embeddings`."
)
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
column_names = raw_datasets["validation"].column_names
elif training_args.do_predict:
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
column_names = raw_datasets["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):
assert data_args.lang is not None, (
f"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --lang argument"
)
tokenizer.src_lang = data_args.lang
tokenizer.tgt_lang = data_args.lang
# For multilingual translation models like mBART-50 and M2M100 we need to force the target language token
# as the first generated token. We ask the user to explicitly provide this as --forced_bos_token argument.
forced_bos_token_id = (
tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None
)
model.config.forced_bos_token_id = forced_bos_token_id
# Get the column names for input/target.
dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None)
if data_args.text_column is None:
text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
text_column = data_args.text_column
if text_column not in column_names:
raise ValueError(
f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}"
)
if data_args.summary_column is None:
summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
summary_column = data_args.summary_column
if summary_column not in column_names:
raise ValueError(
f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}"
)
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for "
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
def preprocess_function(examples):
# remove pairs where at least one record is None
inputs, targets = [], []
for i in range(len(examples[text_column])):
if examples[text_column][i] and examples[summary_column][i]:
inputs.append(examples[text_column][i])
targets.append(examples[summary_column][i])
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Tokenize targets with the `text_target` keyword argument
labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
if training_args.do_train:
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
predict_dataset = raw_datasets["test"]
if data_args.max_predict_samples is not None:
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
# Metric
metric = evaluate.load("rouge", cache_dir=model_args.cache_dir)
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
# rougeLSum expects newline after each sentence
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels]
return preds, labels
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
# Replace -100s used for padding as we can't decode them
preds = np.where(preds != -100, preds, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
result = {k: round(v * 100, 4) for k, v in result.items()}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
return result
# Override the decoding parameters of Seq2SeqTrainer
training_args.generation_max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
training_args.generation_num_beams = (
data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
)
# Initialize our Trainer
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
processing_class=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
if isinstance(eval_dataset, dict):
metrics = {}
for eval_ds_name, eval_ds in eval_dataset.items():
dataset_metrics = trainer.evaluate(eval_dataset=eval_ds, metric_key_prefix=f"eval_{eval_ds_name}")
metrics.update(dataset_metrics)
else:
metrics = trainer.evaluate(metric_key_prefix="eval")
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.do_predict:
logger.info("*** Predict ***")
predict_results = trainer.predict(predict_dataset, metric_key_prefix="predict")
metrics = predict_results.metrics
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
if trainer.is_world_process_zero():
if training_args.predict_with_generate:
predictions = predict_results.predictions
predictions = np.where(predictions != -100, predictions, tokenizer.pad_token_id)
predictions = tokenizer.batch_decode(
predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
predictions = [pred.strip() for pred in predictions]
output_prediction_file = os.path.join(training_args.output_dir, "generated_predictions.txt")
with open(output_prediction_file, "w") as writer:
writer.write("\n".join(predictions))
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "summarization"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if data_args.lang is not None:
kwargs["language"] = data_args.lang
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| transformers/examples/pytorch/summarization/run_summarization.py/0 | {
"file_path": "transformers/examples/pytorch/summarization/run_summarization.py",
"repo_id": "transformers",
"token_count": 13793
} | 428 |
#!/usr/bin/env python
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for multiple choice.
"""
# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.
import json
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from pathlib import Path
from typing import Optional
import datasets
import tensorflow as tf
from datasets import load_dataset
import transformers
from transformers import (
CONFIG_NAME,
TF2_WEIGHTS_NAME,
AutoConfig,
AutoTokenizer,
DataCollatorForMultipleChoice,
DefaultDataCollator,
HfArgumentParser,
PushToHubCallback,
TFAutoModelForMultipleChoice,
TFTrainingArguments,
create_optimizer,
set_seed,
)
from transformers.utils import check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.56.0.dev0")
logger = logging.getLogger(__name__)
# region Arguments
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `hf auth login` (stored in `~/.huggingface`)."
)
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option "
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
"execute code present on the Hub on your local machine."
)
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
def __post_init__(self):
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
# endregion
def main():
# region Argument parsing
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag", model_args, data_args, framework="tensorflow")
output_dir = Path(training_args.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
# endregion
# region Logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# endregion
# region Checkpoints
checkpoint = None
if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir:
if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file():
checkpoint = output_dir
logger.info(
f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this"
" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
else:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to continue regardless."
)
# endregion
# Set seed before initializing model.
set_seed(training_args.seed)
# region Load datasets
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
token=model_args.token,
)
else:
# Downloading and loading the swag dataset from the hub.
raw_datasets = load_dataset(
"swag",
"regular",
cache_dir=model_args.cache_dir,
token=model_args.token,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.
# When using your own dataset or a different dataset from swag, you will probably need to change this.
ending_names = [f"ending{i}" for i in range(4)]
context_name = "sent1"
question_header_name = "sent2"
# endregion
# region Load model config and tokenizer
if checkpoint is not None:
config_path = training_args.output_dir
elif model_args.config_name:
config_path = model_args.config_name
else:
config_path = model_args.model_name_or_path
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
config_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
# endregion
# region Dataset preprocessing
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the "
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
first_sentences = [[context] * 4 for context in examples[context_name]]
question_headers = examples[question_header_name]
second_sentences = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)
]
# Flatten out
first_sentences = list(chain(*first_sentences))
second_sentences = list(chain(*second_sentences))
# Tokenize
tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True, max_length=max_seq_length)
# Un-flatten
data = {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
return data
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if data_args.pad_to_max_length:
data_collator = DefaultDataCollator(return_tensors="np")
else:
data_collator = DataCollatorForMultipleChoice(tokenizer, return_tensors="tf")
# endregion
with training_args.strategy.scope():
# region Build model
if checkpoint is None:
model_path = model_args.model_name_or_path
else:
model_path = checkpoint
model = TFAutoModelForMultipleChoice.from_pretrained(
model_path,
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
num_replicas = training_args.strategy.num_replicas_in_sync
total_train_batch_size = training_args.per_device_train_batch_size * num_replicas
total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas
if training_args.do_train:
num_train_steps = (len(train_dataset) // total_train_batch_size) * int(training_args.num_train_epochs)
if training_args.warmup_steps > 0:
num_warmup_steps = training_args.warmup_steps
elif training_args.warmup_ratio > 0:
num_warmup_steps = int(num_train_steps * training_args.warmup_ratio)
else:
num_warmup_steps = 0
optimizer, lr_schedule = create_optimizer(
init_lr=training_args.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
adam_beta1=training_args.adam_beta1,
adam_beta2=training_args.adam_beta2,
adam_epsilon=training_args.adam_epsilon,
weight_decay_rate=training_args.weight_decay,
adam_global_clipnorm=training_args.max_grad_norm,
)
else:
optimizer = "sgd" # Just write anything because we won't be using it
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=optimizer, metrics=["accuracy"], jit_compile=training_args.xla)
# endregion
# region Preparing push_to_hub and model card
push_to_hub_model_id = training_args.push_to_hub_model_id
model_name = model_args.model_name_or_path.split("/")[-1]
if not push_to_hub_model_id:
push_to_hub_model_id = f"{model_name}-finetuned-multiplechoice"
model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice"}
if training_args.push_to_hub:
callbacks = [
PushToHubCallback(
output_dir=training_args.output_dir,
hub_model_id=push_to_hub_model_id,
hub_token=training_args.push_to_hub_token,
tokenizer=tokenizer,
**model_card_kwargs,
)
]
else:
callbacks = []
# endregion
# region Training
eval_metrics = None
if training_args.do_train:
dataset_options = tf.data.Options()
dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
# model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in
# training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also
# use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names
# yourself if you use this method, whereas they are automatically inferred from the model input names when
# using model.prepare_tf_dataset()
# For more info see the docs:
# https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset
# https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset
tf_train_dataset = model.prepare_tf_dataset(
train_dataset,
shuffle=True,
batch_size=total_train_batch_size,
collate_fn=data_collator,
).with_options(dataset_options)
if training_args.do_eval:
validation_data = model.prepare_tf_dataset(
eval_dataset,
shuffle=False,
batch_size=total_eval_batch_size,
collate_fn=data_collator,
drop_remainder=True,
).with_options(dataset_options)
else:
validation_data = None
history = model.fit(
tf_train_dataset,
validation_data=validation_data,
epochs=int(training_args.num_train_epochs),
callbacks=callbacks,
)
eval_metrics = {key: val[-1] for key, val in history.history.items()}
# endregion
# region Evaluation
if training_args.do_eval and not training_args.do_train:
dataset_options = tf.data.Options()
dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
# Do a standalone evaluation pass
tf_eval_dataset = model.prepare_tf_dataset(
eval_dataset,
shuffle=False,
batch_size=total_eval_batch_size,
collate_fn=data_collator,
drop_remainder=True,
).with_options(dataset_options)
eval_results = model.evaluate(tf_eval_dataset)
eval_metrics = {"val_loss": eval_results[0], "val_accuracy": eval_results[1]}
# endregion
if eval_metrics is not None and training_args.output_dir is not None:
output_eval_file = os.path.join(training_args.output_dir, "all_results.json")
with open(output_eval_file, "w") as writer:
writer.write(json.dumps(eval_metrics))
# region Push to hub
if training_args.output_dir is not None and not training_args.push_to_hub:
# If we're not pushing to hub, at least save a local copy when we're done
model.save_pretrained(training_args.output_dir)
# endregion
if __name__ == "__main__":
main()
| transformers/examples/tensorflow/multiple-choice/run_swag.py/0 | {
"file_path": "transformers/examples/tensorflow/multiple-choice/run_swag.py",
"repo_id": "transformers",
"token_count": 8857
} | 429 |
<!---
Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Translation example
This script shows an example of training a *translation* model with the 🤗 Transformers library.
For straightforward use-cases you may be able to use these scripts without modification, although we have also
included comments in the code to indicate areas that you may need to adapt to your own projects.
### Multi-GPU and TPU usage
By default, these scripts use a `MirroredStrategy` and will use multiple GPUs effectively if they are available. TPUs
can also be used by passing the name of the TPU resource with the `--tpu` argument.
### Example commands and caveats
MBart and some T5 models require special handling.
T5 models `google-t5/t5-small`, `google-t5/t5-base`, `google-t5/t5-large`, `google-t5/t5-3b` and `google-t5/t5-11b` must use an additional argument: `--source_prefix "translate {source_lang} to {target_lang}"`. For example:
```bash
python run_translation.py \
--model_name_or_path google-t5/t5-small \
--do_train \
--do_eval \
--source_lang en \
--target_lang ro \
--source_prefix "translate English to Romanian: " \
--dataset_name wmt16 \
--dataset_config_name ro-en \
--output_dir /tmp/tst-translation \
--per_device_train_batch_size=16 \
--per_device_eval_batch_size=16 \
--overwrite_output_dir
```
If you get a terrible BLEU score, make sure that you didn't forget to use the `--source_prefix` argument.
For the aforementioned group of T5 models it's important to remember that if you switch to a different language pair, make sure to adjust the source and target values in all 3 language-specific command line argument: `--source_lang`, `--target_lang` and `--source_prefix`.
MBart models require a different format for `--source_lang` and `--target_lang` values, e.g. instead of `en` it expects `en_XX`, for `ro` it expects `ro_RO`. The full MBart specification for language codes can be found [here](https://huggingface.co/facebook/mbart-large-cc25). For example:
```bash
python run_translation.py \
--model_name_or_path facebook/mbart-large-en-ro \
--do_train \
--do_eval \
--dataset_name wmt16 \
--dataset_config_name ro-en \
--source_lang en_XX \
--target_lang ro_RO \
--output_dir /tmp/tst-translation \
--per_device_train_batch_size=16 \
--per_device_eval_batch_size=16 \
--overwrite_output_dir
```
| transformers/examples/tensorflow/translation/README.md/0 | {
"file_path": "transformers/examples/tensorflow/translation/README.md",
"repo_id": "transformers",
"token_count": 933
} | 430 |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import copy
import json
import os
import platform
import re
import string
import time
from argparse import ArgumentParser, Namespace
from collections.abc import AsyncIterator
from dataclasses import dataclass, field
from threading import Thread
from typing import Optional
import yaml
from huggingface_hub import AsyncInferenceClient, ChatCompletionStreamOutput
from transformers import (
AutoTokenizer,
GenerationConfig,
PreTrainedTokenizer,
)
from transformers.commands import BaseTransformersCLICommand
from transformers.commands.serving import ServeArguments, ServeCommand
from transformers.utils import is_rich_available, is_torch_available
if platform.system() != "Windows":
import pwd
if is_rich_available():
from rich.console import Console
from rich.live import Live
from rich.markdown import Markdown
if is_torch_available():
import torch
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
GenerationConfig,
)
ALLOWED_KEY_CHARS = set(string.ascii_letters + string.whitespace)
ALLOWED_VALUE_CHARS = set(
string.ascii_letters + string.digits + string.whitespace + r".!\"#$%&'()*+,\-/:<=>?@[]^_`{|}~"
)
DEFAULT_EXAMPLES = {
"llama": {"text": "There is a Llama in my lawn, how can I get rid of it?"},
"code": {
"text": (
"Write a Python function that integrates any Python function f(x) numerically over an arbitrary "
"interval [x_start, x_end]."
),
},
"helicopter": {"text": "How many helicopters can a human eat in one sitting?"},
"numbers": {"text": "Count to 10 but skip every number ending with an 'e'"},
"birds": {"text": "Why aren't birds real?"},
"socks": {"text": "Why is it important to eat socks after meditating?"},
"numbers2": {"text": "Which number is larger, 9.9 or 9.11?"},
}
# Printed at the start of a chat session
HELP_STRING_MINIMAL = """
**TRANSFORMERS CHAT INTERFACE**
Chat interface to try out a model. Besides chatting with the model, here are some basic commands:
- **!help**: shows all available commands (set generation settings, save chat, etc.)
- **!status**: shows the current status of the model and generation settings
- **!clear**: clears the current conversation and starts a new one
- **!exit**: closes the interface
"""
# Printed when the user types `help` in the chat session
HELP_STRING = f"""
**TRANSFORMERS CHAT INTERFACE HELP**
Full command list:
- **!help**: shows this help message
- **!clear**: clears the current conversation and starts a new one
- **!status**: shows the current status of the model and generation settings
- **!example {{NAME}}**: loads example named `{{NAME}}` from the config and uses it as the user input.
Available example names: `{"`, `".join(DEFAULT_EXAMPLES.keys())}`
- **!set {{ARG_1}}={{VALUE_1}} {{ARG_2}}={{VALUE_2}}** ...: changes the system prompt or generation settings (multiple
settings are separated by a space). Accepts the same flags and format as the `generate_flags` CLI argument.
If you're a new user, check this basic flag guide: https://huggingface.co/docs/transformers/llm_tutorial#common-options
- **!save {{SAVE_NAME}} (optional)**: saves the current chat and settings to file by default to
`./chat_history/{{MODEL_NAME}}/chat_{{DATETIME}}.yaml` or `{{SAVE_NAME}}` if provided
- **!exit**: closes the interface
"""
class RichInterface:
def __init__(self, model_name: Optional[str] = None, user_name: Optional[str] = None):
self._console = Console()
if model_name is None:
self.model_name = "assistant"
else:
self.model_name = model_name
if user_name is None:
self.user_name = "user"
else:
self.user_name = user_name
async def stream_output(self, stream: AsyncIterator[ChatCompletionStreamOutput]) -> tuple[str, int]:
self._console.print(f"[bold blue]<{self.model_name}>:")
with Live(console=self._console, refresh_per_second=4) as live:
text = ""
async for token in await stream:
outputs = token.choices[0].delta.content
request_id = token.id
if not outputs:
continue
# Escapes single words encased in <>, e.g. <think> -> \<think\>, for proper rendering in Markdown.
# It only escapes single words that may have `_`, optionally following a `/` (e.g. </think>)
outputs = re.sub(r"<(/*)(\w*)>", r"\<\1\2\>", outputs)
text += outputs
# Render the accumulated text as Markdown
# NOTE: this is a workaround for the rendering "unstandard markdown"
# in rich. The chatbots output treat "\n" as a new line for
# better compatibility with real-world text. However, rendering
# in markdown would break the format. It is because standard markdown
# treat a single "\n" in normal text as a space.
# Our workaround is adding two spaces at the end of each line.
# This is not a perfect solution, as it would
# introduce trailing spaces (only) in code block, but it works well
# especially for console output, because in general the console does not
# care about trailing spaces.
lines = []
for line in text.splitlines():
lines.append(line)
if line.startswith("```"):
# Code block marker - do not add trailing spaces, as it would
# break the syntax highlighting
lines.append("\n")
else:
lines.append(" \n")
markdown = Markdown("".join(lines).strip(), code_theme="github-dark")
# Update the Live console output
live.update(markdown, refresh=True)
self._console.print()
return text, request_id
def input(self) -> str:
"""Gets user input from the console."""
input = self._console.input(f"[bold red]<{self.user_name}>:\n")
self._console.print()
return input
def clear(self):
"""Clears the console."""
self._console.clear()
def print_user_message(self, text: str):
"""Prints a user message to the console."""
self._console.print(f"[bold red]<{self.user_name}>:[/ bold red]\n{text}")
self._console.print()
def print_color(self, text: str, color: str):
"""Prints text in a given color to the console."""
self._console.print(f"[bold {color}]{text}")
self._console.print()
def print_help(self, minimal: bool = False):
"""Prints the help message to the console."""
self._console.print(Markdown(HELP_STRING_MINIMAL if minimal else HELP_STRING))
self._console.print()
def print_status(self, model_name: str, generation_config: GenerationConfig, model_kwargs: dict):
"""Prints the status of the model and generation settings to the console."""
self._console.print(f"[bold blue]Model: {model_name}\n")
if model_kwargs:
self._console.print(f"[bold blue]Model kwargs: {model_kwargs}")
self._console.print(f"[bold blue]{generation_config}")
self._console.print()
@dataclass
class ChatArguments:
r"""
Arguments for the chat CLI.
See the metadata arg for each argument's description -- the medatata will be printed with
`transformers chat --help`
"""
# General settings
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "Name of the pre-trained model. The positional argument will take precedence if both are passed."
},
)
user: Optional[str] = field(
default=None,
metadata={"help": "Username to display in chat interface. Defaults to the current user's name."},
)
system_prompt: Optional[str] = field(default=None, metadata={"help": "System prompt."})
save_folder: str = field(default="./chat_history/", metadata={"help": "Folder to save chat history."})
examples_path: Optional[str] = field(default=None, metadata={"help": "Path to a yaml file with examples."})
verbose: bool = field(default=False, metadata={"help": "Whether to show runtime warnings in the chat interface."})
# Generation settings
generation_config: Optional[str] = field(
default=None,
metadata={
"help": (
"Path to a local generation config file or to a HuggingFace repo containing a "
"`generation_config.json` file. Other generation settings passed as CLI arguments will be applied on "
"top of this generation config."
),
},
)
# Model loading
model_revision: str = field(
default="main",
metadata={"help": "Specific model version to use (can be a branch name, tag name or commit id)."},
)
device: str = field(default="auto", metadata={"help": "Device to use for inference."})
torch_dtype: Optional[str] = field(
default=None,
metadata={
"help": "`torch_dtype` is deprecated! Please use `dtype` argument instead.",
"choices": ["auto", "bfloat16", "float16", "float32"],
},
)
dtype: Optional[str] = field(
default="auto",
metadata={
"help": "Override the default `torch.dtype` and load the model under this dtype. If `'auto'` is passed, "
"the dtype will be automatically derived from the model's weights.",
"choices": ["auto", "bfloat16", "float16", "float32"],
},
)
trust_remote_code: bool = field(
default=False, metadata={"help": "Whether to trust remote code when loading a model."}
)
attn_implementation: Optional[str] = field(
default=None,
metadata={
"help": "Which attention implementation to use; you can run --attn_implementation=flash_attention_2, in "
"which case you must install this manually by running `pip install flash-attn --no-build-isolation`."
},
)
load_in_8bit: bool = field(
default=False,
metadata={"help": "Whether to use 8 bit precision for the base model - works only with LoRA."},
)
load_in_4bit: bool = field(
default=False,
metadata={"help": "Whether to use 4 bit precision for the base model - works only with LoRA."},
)
bnb_4bit_quant_type: str = field(default="nf4", metadata={"help": "Quantization type.", "choices": ["fp4", "nf4"]})
use_bnb_nested_quant: bool = field(default=False, metadata={"help": "Whether to use nested quantization."})
# Serving settings
host: str = field(default="localhost", metadata={"help": "Interface the server will listen to.."})
port: int = field(default=8000, metadata={"help": "Port the server will listen to."})
def __post_init__(self):
"""Only used for BC `torch_dtype` argument."""
# In this case only the BC torch_dtype was given
if self.torch_dtype is not None and self.dtype == "auto":
self.dtype = self.torch_dtype
def chat_command_factory(args: Namespace):
"""
Factory function used to chat with a local model.
"""
return ChatCommand(args)
class ChatCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
Args:
parser: Root parser to register command-specific arguments
"""
dataclass_types = (ChatArguments,)
chat_parser = parser.add_parser("chat", dataclass_types=dataclass_types)
group = chat_parser.add_argument_group("Positional arguments")
group.add_argument(
"model_name_or_path_or_address",
type=str,
default=None,
help="Name of the pre-trained model or address to connect to.",
)
group.add_argument(
"generate_flags",
type=str,
default=None,
help=(
"Flags to pass to `generate`, using a space as a separator between flags. Accepts booleans, numbers, "
"and lists of integers, more advanced parameterization should be set through --generation-config. "
"Example: `transformers chat <model_repo> max_new_tokens=100 do_sample=False eos_token_id=[1,2]`. "
"If you're a new user, check this basic flag guide: "
"https://huggingface.co/docs/transformers/llm_tutorial#common-options"
),
nargs="*",
)
chat_parser.set_defaults(func=chat_command_factory)
def __init__(self, args):
if args.model_name_or_path_or_address is not None:
name = args.model_name_or_path_or_address
if name.startswith("http") or name.startswith("https") or name.startswith("localhost"):
self.spawn_backend = False
if args.host != "localhost" or args.port != 8000:
raise ValueError(
"Looks like you’ve set both a server address and a custom host/port. "
"Please pick just one way to specify the server."
)
args.host, args.port = args.model_name_or_path_or_address.rsplit(":", 1)
if args.model_name_or_path is None:
raise ValueError(
"When connecting to a server, please specify a model name with the --model_name_or_path flag."
)
else:
self.spawn_backend = True
args.model_name_or_path = args.model_name_or_path_or_address
if not is_rich_available() and (not is_torch_available() and self.spawn_backend):
raise ImportError(
"You need to install rich to use the chat interface. Additionally, you have not specified a remote "
"endpoint and are therefore spawning a backend. Torch is required for this: (`pip install rich torch`)"
)
elif not is_rich_available():
raise ImportError("You need to install rich to use the chat interface. (`pip install rich`)")
elif not is_torch_available() and self.spawn_backend:
raise ImportError(
"You have not specified a remote endpoint and are therefore spawning a backend. Torch is required "
"for this: (`pip install rich torch`)"
)
self.args = args
# -----------------------------------------------------------------------------------------------------------------
# Chat session methods
@staticmethod
def get_username() -> str:
"""Returns the username of the current user."""
if platform.system() == "Windows":
return os.getlogin()
else:
return pwd.getpwuid(os.getuid()).pw_name
@staticmethod
def save_chat(chat, args: ChatArguments, filename: Optional[str] = None) -> str:
"""Saves the chat history to a file."""
output_dict = {}
output_dict["settings"] = vars(args)
output_dict["chat_history"] = chat
folder = args.save_folder
if filename is None:
time_str = time.strftime("%Y-%m-%d_%H-%M-%S")
filename = f"{args.model_name_or_path_or_address}/chat_{time_str}.json"
filename = os.path.join(folder, filename)
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w") as f:
json.dump(output_dict, f, indent=4)
return os.path.abspath(filename)
@staticmethod
def clear_chat_history(system_prompt: Optional[str] = None) -> list[dict]:
"""Clears the chat history."""
if system_prompt is None:
chat = []
else:
chat = [{"role": "system", "content": system_prompt}]
return chat
# -----------------------------------------------------------------------------------------------------------------
# Input parsing methods
def parse_generate_flags(self, generate_flags: list[str]) -> dict:
"""Parses the generate flags from the user input into a dictionary of `generate` kwargs."""
if len(generate_flags) == 0:
return {}
# Assumption: `generate_flags` is a list of strings, each string being a `flag=value` pair, that can be parsed
# into a json string if we:
# 1. Add quotes around each flag name
generate_flags_as_dict = {'"' + flag.split("=")[0] + '"': flag.split("=")[1] for flag in generate_flags}
# 2. Handle types:
# 2. a. booleans should be lowercase, None should be null
generate_flags_as_dict = {
k: v.lower() if v.lower() in ["true", "false"] else v for k, v in generate_flags_as_dict.items()
}
generate_flags_as_dict = {k: "null" if v == "None" else v for k, v in generate_flags_as_dict.items()}
# 2. b. strings should be quoted
def is_number(s: str) -> bool:
# handle negative numbers
if s.startswith("-"):
s = s[1:]
return s.replace(".", "", 1).isdigit()
generate_flags_as_dict = {k: f'"{v}"' if not is_number(v) else v for k, v in generate_flags_as_dict.items()}
# 2. c. [no processing needed] lists are lists of ints because `generate` doesn't take lists of strings :)
# We also mention in the help message that we only accept lists of ints for now.
# 3. Join the result into a comma separated string
generate_flags_string = ", ".join([f"{k}: {v}" for k, v in generate_flags_as_dict.items()])
# 4. Add the opening/closing brackets
generate_flags_string = "{" + generate_flags_string + "}"
# 5. Remove quotes around boolean/null and around lists
generate_flags_string = generate_flags_string.replace('"null"', "null")
generate_flags_string = generate_flags_string.replace('"true"', "true")
generate_flags_string = generate_flags_string.replace('"false"', "false")
generate_flags_string = generate_flags_string.replace('"[', "[")
generate_flags_string = generate_flags_string.replace(']"', "]")
# 6. Replace the `=` with `:`
generate_flags_string = generate_flags_string.replace("=", ":")
try:
processed_generate_flags = json.loads(generate_flags_string)
except json.JSONDecodeError:
raise ValueError(
"Failed to convert `generate_flags` into a valid JSON object."
"\n`generate_flags` = {generate_flags}"
"\nConverted JSON string = {generate_flags_string}"
)
return processed_generate_flags
def get_generation_parameterization(
self, args: ChatArguments, model_generation_config: GenerationConfig
) -> tuple[GenerationConfig, dict]:
"""
Returns a GenerationConfig object holding the generation parameters for the CLI command.
"""
# No generation config arg provided -> use model's default generation config, then apply CLI defaults
if args.generation_config is not None:
if ".json" in args.generation_config: # is a local file
dirname = os.path.dirname(args.generation_config)
filename = os.path.basename(args.generation_config)
generation_config = GenerationConfig.from_pretrained(dirname, filename)
else:
generation_config = GenerationConfig.from_pretrained(args.generation_config)
else:
# !!!!!!!!!
# This is a chat session, so we have a few non-standard defaults
# !!!!!!!!!
generation_config = copy.deepcopy(model_generation_config)
generation_config.update(**{"do_sample": True, "max_new_tokens": 256})
# Finally: parse and apply `generate_flags`
parsed_generate_flags = self.parse_generate_flags(args.generate_flags)
model_kwargs = generation_config.update(**parsed_generate_flags)
# `model_kwargs` contain non-generation flags in `parsed_generate_flags` that should be passed directly to
# `generate`
return generation_config, model_kwargs
@staticmethod
def parse_eos_tokens(
tokenizer: PreTrainedTokenizer,
generation_config: GenerationConfig,
eos_tokens: Optional[str],
eos_token_ids: Optional[str],
) -> tuple[int, list[int]]:
"""Retrieves the pad token ID and all possible EOS token IDs."""
if generation_config.pad_token_id is None:
pad_token_id = generation_config.eos_token_id
else:
pad_token_id = generation_config.pad_token_id
all_eos_token_ids = []
if eos_tokens is not None:
all_eos_token_ids.extend(tokenizer.convert_tokens_to_ids(eos_tokens.split(",")))
if eos_token_ids is not None:
all_eos_token_ids.extend([int(token_id) for token_id in eos_token_ids.split(",")])
if len(all_eos_token_ids) == 0:
all_eos_token_ids.append(generation_config.eos_token_id)
return pad_token_id, all_eos_token_ids
# -----------------------------------------------------------------------------------------------------------------
# Model loading and performance automation methods
@staticmethod
def get_quantization_config(model_args: ChatArguments) -> Optional["BitsAndBytesConfig"]:
if model_args.load_in_4bit:
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
# For consistency with model weights, we use the same value as `dtype`
bnb_4bit_compute_dtype=model_args.dtype,
bnb_4bit_quant_type=model_args.bnb_4bit_quant_type,
bnb_4bit_use_double_quant=model_args.use_bnb_nested_quant,
bnb_4bit_quant_storage=model_args.dtype,
)
elif model_args.load_in_8bit:
quantization_config = BitsAndBytesConfig(
load_in_8bit=True,
)
else:
quantization_config = None
return quantization_config
def load_model_and_tokenizer(self, args: ChatArguments) -> tuple["AutoModelForCausalLM", AutoTokenizer]:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path_positional,
revision=args.model_revision,
trust_remote_code=args.trust_remote_code,
)
dtype = args.dtype if args.dtype in ["auto", None] else getattr(torch, args.dtype)
quantization_config = self.get_quantization_config(args)
model_kwargs = {
"revision": args.model_revision,
"attn_implementation": args.attn_implementation,
"dtype": dtype,
"device_map": "auto",
"quantization_config": quantization_config,
}
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path_positional, trust_remote_code=args.trust_remote_code, **model_kwargs
)
if getattr(model, "hf_device_map", None) is None:
model = model.to(args.device)
return model, tokenizer
# -----------------------------------------------------------------------------------------------------------------
# User commands
def handle_non_exit_user_commands(
self,
user_input: str,
args: ChatArguments,
interface: RichInterface,
examples: dict[str, dict[str, str]],
generation_config: GenerationConfig,
model_kwargs: dict,
chat: list[dict],
) -> tuple[list[dict], GenerationConfig, dict]:
"""
Handles all user commands except for `!exit`. May update the chat history (e.g. reset it) or the
generation config (e.g. set a new flag).
"""
valid_command = True
if user_input == "!clear":
chat = self.clear_chat_history(args.system_prompt)
interface.clear()
elif user_input == "!help":
interface.print_help()
elif user_input.startswith("!save") and len(user_input.split()) < 2:
split_input = user_input.split()
if len(split_input) == 2:
filename = split_input[1]
else:
filename = None
filename = self.save_chat(chat, args, filename)
interface.print_color(text=f"Chat saved in {filename}!", color="green")
elif user_input.startswith("!set"):
# splits the new args into a list of strings, each string being a `flag=value` pair (same format as
# `generate_flags`)
new_generate_flags = user_input[4:].strip()
new_generate_flags = new_generate_flags.split()
# sanity check: each member in the list must have an =
for flag in new_generate_flags:
if "=" not in flag:
interface.print_color(
text=(
f"Invalid flag format, missing `=` after `{flag}`. Please use the format "
"`arg_1=value_1 arg_2=value_2 ...`."
),
color="red",
)
break
else:
# parses the new args into a dictionary of `generate` kwargs, and updates the corresponding variables
parsed_new_generate_flags = self.parse_generate_flags(new_generate_flags)
new_model_kwargs = generation_config.update(**parsed_new_generate_flags)
model_kwargs.update(**new_model_kwargs)
elif user_input.startswith("!example") and len(user_input.split()) == 2:
example_name = user_input.split()[1]
if example_name in examples:
interface.clear()
chat = []
interface.print_user_message(examples[example_name]["text"])
chat.append({"role": "user", "content": examples[example_name]["text"]})
else:
example_error = (
f"Example {example_name} not found in list of available examples: {list(examples.keys())}."
)
interface.print_color(text=example_error, color="red")
elif user_input == "!status":
interface.print_status(
model_name=args.model_name_or_path,
generation_config=generation_config,
model_kwargs=model_kwargs,
)
else:
valid_command = False
interface.print_color(text=f"'{user_input}' is not a valid command. Showing help message.", color="red")
interface.print_help()
return chat, valid_command, generation_config, model_kwargs
# -----------------------------------------------------------------------------------------------------------------
# Main logic
def run(self):
asyncio.run(self._inner_run())
async def _inner_run(self):
if self.spawn_backend:
serve_args = ServeArguments(
device=self.args.device,
dtype=self.args.dtype,
trust_remote_code=self.args.trust_remote_code,
attn_implementation=self.args.attn_implementation,
load_in_8bit=self.args.load_in_8bit,
load_in_4bit=self.args.load_in_4bit,
bnb_4bit_quant_type=self.args.bnb_4bit_quant_type,
use_bnb_nested_quant=self.args.use_bnb_nested_quant,
host=self.args.host,
port=self.args.port,
log_level="error",
)
serve_command = ServeCommand(serve_args)
thread = Thread(target=serve_command.run)
thread.daemon = True
thread.start()
model = self.args.model_name_or_path + "@" + self.args.model_revision
host = "http://localhost" if self.args.host == "localhost" else self.args.host
client = AsyncInferenceClient(f"{host}:{self.args.port}")
args = self.args
if args.examples_path is None:
examples = DEFAULT_EXAMPLES
else:
with open(args.examples_path) as f:
examples = yaml.safe_load(f)
if args.user is None:
user = self.get_username()
else:
user = args.user
model_generation_config = GenerationConfig.from_pretrained(args.model_name_or_path)
generation_config, model_kwargs = self.get_generation_parameterization(args, model_generation_config)
interface = RichInterface(model_name=args.model_name_or_path, user_name=user)
interface.clear()
chat = self.clear_chat_history(args.system_prompt)
request_id = None
# Starts the session with a minimal help message at the top, so that a user doesn't get stuck
interface.print_help(minimal=True)
while True:
try:
user_input = interface.input()
# User commands
if user_input.startswith("!"):
# `!exit` is special, it breaks the loop
if user_input == "!exit":
break
else:
chat, valid_command, generation_config, model_kwargs = self.handle_non_exit_user_commands(
user_input=user_input,
args=args,
interface=interface,
examples=examples,
generation_config=generation_config,
model_kwargs=model_kwargs,
chat=chat,
)
# `!example` sends a user message to the model
if not valid_command or not user_input.startswith("!example"):
continue
else:
chat.append({"role": "user", "content": user_input})
stream = client.chat_completion(
chat,
stream=True,
extra_body={
"request_id": request_id,
"generation_config": generation_config.to_json_string(),
"model": model,
},
)
model_output, request_id = await interface.stream_output(stream)
chat.append({"role": "assistant", "content": model_output})
except KeyboardInterrupt:
break
finally:
await client.close()
if __name__ == "__main__":
args = ChatArguments()
args.model_name_or_path_or_address = "meta-llama/Llama-3.2-3b-Instruct"
args.model_name_or_path_or_address = "http://localhost:8000"
chat = ChatCommand(args)
chat.run()
| transformers/src/transformers/commands/chat.py/0 | {
"file_path": "transformers/src/transformers/commands/chat.py",
"repo_id": "transformers",
"token_count": 13581
} | 431 |
# Copyright 2021 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to dynamically load objects from the Hub."""
import ast
import filecmp
import hashlib
import importlib
import importlib.metadata
import importlib.util
import os
import re
import shutil
import signal
import sys
import threading
import warnings
from pathlib import Path
from types import ModuleType
from typing import Any, Optional, Union
from huggingface_hub import try_to_load_from_cache
from packaging import version
from .utils import (
HF_MODULES_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
cached_file,
extract_commit_hash,
is_offline_mode,
logging,
)
from .utils.import_utils import VersionComparison, split_package_version
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
_HF_REMOTE_CODE_LOCK = threading.Lock()
def init_hf_modules():
"""
Creates the cache directory for modules with an init, and adds it to the Python path.
"""
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(HF_MODULES_CACHE)
os.makedirs(HF_MODULES_CACHE, exist_ok=True)
init_path = Path(HF_MODULES_CACHE) / "__init__.py"
if not init_path.exists():
init_path.touch()
importlib.invalidate_caches()
def create_dynamic_module(name: Union[str, os.PathLike]) -> None:
"""
Creates a dynamic module in the cache directory for modules.
Args:
name (`str` or `os.PathLike`):
The name of the dynamic module to create.
"""
init_hf_modules()
dynamic_module_path = (Path(HF_MODULES_CACHE) / name).resolve()
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent)
os.makedirs(dynamic_module_path, exist_ok=True)
init_path = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
# It is extremely important to invalidate the cache when we change stuff in those modules, or users end up
# with errors about module that do not exist. Same for all other `invalidate_caches` in this file.
importlib.invalidate_caches()
def get_relative_imports(module_file: Union[str, os.PathLike]) -> list[str]:
"""
Get the list of modules that are relatively imported in a module file.
Args:
module_file (`str` or `os.PathLike`): The module file to inspect.
Returns:
`list[str]`: The list of relative imports in the module.
"""
with open(module_file, encoding="utf-8") as f:
content = f.read()
# Imports of the form `import .xxx`
relative_imports = re.findall(r"^\s*import\s+\.(\S+)\s*$", content, flags=re.MULTILINE)
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall(r"^\s*from\s+\.(\S+)\s+import", content, flags=re.MULTILINE)
# Unique-ify
return list(set(relative_imports))
def get_relative_import_files(module_file: Union[str, os.PathLike]) -> list[str]:
"""
Get the list of all files that are needed for a given module. Note that this function recurses through the relative
imports (if a imports b and b imports c, it will return module files for b and c).
Args:
module_file (`str` or `os.PathLike`): The module file to inspect.
Returns:
`list[str]`: The list of all relative imports a given module needs (recursively), which will give us the list
of module files a given module needs.
"""
no_change = False
files_to_check = [module_file]
all_relative_imports = []
# Let's recurse through all relative imports
while not no_change:
new_imports = []
for f in files_to_check:
new_imports.extend(get_relative_imports(f))
module_path = Path(module_file).parent
new_import_files = [f"{str(module_path / m)}.py" for m in new_imports]
files_to_check = [f for f in new_import_files if f not in all_relative_imports]
no_change = len(files_to_check) == 0
all_relative_imports.extend(files_to_check)
return all_relative_imports
def get_imports(filename: Union[str, os.PathLike]) -> list[str]:
"""
Extracts all the libraries (not relative imports this time) that are imported in a file.
Args:
filename (`str` or `os.PathLike`): The module file to inspect.
Returns:
`list[str]`: The list of all packages required to use the input module.
"""
with open(filename, encoding="utf-8") as f:
content = f.read()
imported_modules = set()
import transformers.utils
def recursive_look_for_imports(node):
if isinstance(node, ast.Try):
return # Don't recurse into Try blocks and ignore imports in them
elif isinstance(node, ast.If):
test = node.test
for condition_node in ast.walk(test):
if isinstance(condition_node, ast.Call):
check_function = getattr(condition_node.func, "id", "")
if (
check_function.endswith("available")
and check_function.startswith("is_flash_attn")
or hasattr(transformers.utils.import_utils, check_function)
):
# Don't recurse into "if flash_attn_available()" or any "if library_available" blocks
# that appears in `transformers.utils.import_utils` and ignore imports in them
return
elif isinstance(node, ast.Import):
# Handle 'import x' statements
for alias in node.names:
top_module = alias.name.split(".")[0]
if top_module:
imported_modules.add(top_module)
elif isinstance(node, ast.ImportFrom):
# Handle 'from x import y' statements, ignoring relative imports
if node.level == 0 and node.module:
top_module = node.module.split(".")[0]
if top_module:
imported_modules.add(top_module)
# Recursively visit all children
for child in ast.iter_child_nodes(node):
recursive_look_for_imports(child)
tree = ast.parse(content)
recursive_look_for_imports(tree)
return sorted(imported_modules)
def check_imports(filename: Union[str, os.PathLike]) -> list[str]:
"""
Check if the current Python environment contains all the libraries that are imported in a file. Will raise if a
library is missing.
Args:
filename (`str` or `os.PathLike`): The module file to check.
Returns:
`list[str]`: The list of relative imports in the file.
"""
imports = get_imports(filename)
missing_packages = []
for imp in imports:
try:
importlib.import_module(imp)
except ImportError as exception:
logger.warning(f"Encountered exception while importing {imp}: {exception}")
# Some packages can fail with an ImportError because of a dependency issue.
# This check avoids hiding such errors.
# See https://github.com/huggingface/transformers/issues/33604
if "No module named" in str(exception):
missing_packages.append(imp)
else:
raise
if len(missing_packages) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"{', '.join(missing_packages)}. Run `pip install {' '.join(missing_packages)}`"
)
return get_relative_imports(filename)
def get_class_in_module(
class_name: str,
module_path: Union[str, os.PathLike],
*,
force_reload: bool = False,
) -> type:
"""
Import a module on the cache directory for modules and extract a class from it.
Args:
class_name (`str`): The name of the class to import.
module_path (`str` or `os.PathLike`): The path to the module to import.
force_reload (`bool`, *optional*, defaults to `False`):
Whether to reload the dynamic module from file if it already exists in `sys.modules`.
Otherwise, the module is only reloaded if the file has changed.
Returns:
`typing.Type`: The class looked for.
"""
name = os.path.normpath(module_path)
if name.endswith(".py"):
name = name[:-3]
name = name.replace(os.path.sep, ".")
module_file: Path = Path(HF_MODULES_CACHE) / module_path
with _HF_REMOTE_CODE_LOCK:
if force_reload:
sys.modules.pop(name, None)
importlib.invalidate_caches()
cached_module: Optional[ModuleType] = sys.modules.get(name)
module_spec = importlib.util.spec_from_file_location(name, location=module_file)
# Hash the module file and all its relative imports to check if we need to reload it
module_files: list[Path] = [module_file] + sorted(map(Path, get_relative_import_files(module_file)))
module_hash: str = hashlib.sha256(b"".join(bytes(f) + f.read_bytes() for f in module_files)).hexdigest()
module: ModuleType
if cached_module is None:
module = importlib.util.module_from_spec(module_spec)
# insert it into sys.modules before any loading begins
sys.modules[name] = module
else:
module = cached_module
# reload in both cases, unless the module is already imported and the hash hits
if getattr(module, "__transformers_module_hash__", "") != module_hash:
module_spec.loader.exec_module(module)
module.__transformers_module_hash__ = module_hash
return getattr(module, class_name)
def get_cached_module_file(
pretrained_model_name_or_path: Union[str, os.PathLike],
module_file: str,
cache_dir: Optional[Union[str, os.PathLike]] = None,
force_download: bool = False,
resume_download: Optional[bool] = None,
proxies: Optional[dict[str, str]] = None,
token: Optional[Union[bool, str]] = None,
revision: Optional[str] = None,
local_files_only: bool = False,
repo_type: Optional[str] = None,
_commit_hash: Optional[str] = None,
**deprecated_kwargs,
) -> str:
"""
Prepares Downloads a module from a local folder or a distant repo and returns its path inside the cached
Transformers module.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a configuration file saved using the
[`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
module_file (`str`):
The name of the module file containing the class to look for.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if they
exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
local_files_only (`bool`, *optional*, defaults to `False`):
If `True`, will only try to load the tokenizer configuration from local files.
repo_type (`str`, *optional*):
Specify the repo type (useful when downloading from a space for instance).
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Returns:
`str`: The path to the module inside the cache.
"""
use_auth_token = deprecated_kwargs.pop("use_auth_token", None)
if use_auth_token is not None:
warnings.warn(
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
FutureWarning,
)
if token is not None:
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
token = use_auth_token
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
# Download and cache module_file from the repo `pretrained_model_name_or_path` of grab it if it's a local file.
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
is_local = os.path.isdir(pretrained_model_name_or_path)
if is_local:
submodule = os.path.basename(pretrained_model_name_or_path)
else:
submodule = pretrained_model_name_or_path.replace("/", os.path.sep)
cached_module = try_to_load_from_cache(
pretrained_model_name_or_path, module_file, cache_dir=cache_dir, revision=_commit_hash, repo_type=repo_type
)
new_files = []
try:
# Load from URL or cache if already cached
resolved_module_file = cached_file(
pretrained_model_name_or_path,
module_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
token=token,
revision=revision,
repo_type=repo_type,
_commit_hash=_commit_hash,
)
if not is_local and cached_module != resolved_module_file:
new_files.append(module_file)
except OSError:
logger.info(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}.")
raise
# Check we have all the requirements in our environment
modules_needed = check_imports(resolved_module_file)
# Now we move the module inside our cached dynamic modules.
full_submodule = TRANSFORMERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(full_submodule)
submodule_path = Path(HF_MODULES_CACHE) / full_submodule
if submodule == os.path.basename(pretrained_model_name_or_path):
# We copy local files to avoid putting too many folders in sys.path. This copy is done when the file is new or
# has changed since last copy.
if not (submodule_path / module_file).exists() or not filecmp.cmp(
resolved_module_file, str(submodule_path / module_file)
):
(submodule_path / module_file).parent.mkdir(parents=True, exist_ok=True)
shutil.copy(resolved_module_file, submodule_path / module_file)
importlib.invalidate_caches()
for module_needed in modules_needed:
module_needed = Path(module_file).parent / f"{module_needed}.py"
module_needed_file = os.path.join(pretrained_model_name_or_path, module_needed)
if not (submodule_path / module_needed).exists() or not filecmp.cmp(
module_needed_file, str(submodule_path / module_needed)
):
shutil.copy(module_needed_file, submodule_path / module_needed)
importlib.invalidate_caches()
else:
# Get the commit hash
commit_hash = extract_commit_hash(resolved_module_file, _commit_hash)
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
submodule_path = submodule_path / commit_hash
full_submodule = full_submodule + os.path.sep + commit_hash
full_submodule_module_file_path = os.path.join(full_submodule, module_file)
create_dynamic_module(Path(full_submodule_module_file_path).parent)
if not (submodule_path / module_file).exists():
shutil.copy(resolved_module_file, submodule_path / module_file)
importlib.invalidate_caches()
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / f"{module_needed}.py").exists():
get_cached_module_file(
pretrained_model_name_or_path,
f"{module_needed}.py",
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
token=token,
revision=revision,
local_files_only=local_files_only,
_commit_hash=commit_hash,
)
new_files.append(f"{module_needed}.py")
if len(new_files) > 0 and revision is None:
new_files = "\n".join([f"- {f}" for f in new_files])
repo_type_str = "" if repo_type is None else f"{repo_type}s/"
url = f"https://huggingface.co/{repo_type_str}{pretrained_model_name_or_path}"
logger.warning(
f"A new version of the following files was downloaded from {url}:\n{new_files}"
"\n. Make sure to double-check they do not contain any added malicious code. To avoid downloading new "
"versions of the code file, you can pin a revision."
)
return os.path.join(full_submodule, module_file)
def get_class_from_dynamic_module(
class_reference: str,
pretrained_model_name_or_path: Union[str, os.PathLike],
cache_dir: Optional[Union[str, os.PathLike]] = None,
force_download: bool = False,
resume_download: Optional[bool] = None,
proxies: Optional[dict[str, str]] = None,
token: Optional[Union[bool, str]] = None,
revision: Optional[str] = None,
local_files_only: bool = False,
repo_type: Optional[str] = None,
code_revision: Optional[str] = None,
**kwargs,
) -> type:
"""
Extracts a class from a module file, present in the local folder or repository of a model.
<Tip warning={true}>
Calling this function will execute the code in the module file found locally or downloaded from the Hub. It should
therefore only be called on trusted repos.
</Tip>
Args:
class_reference (`str`):
The full name of the class to load, including its module and optionally its repo.
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a configuration file saved using the
[`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
This is used when `class_reference` does not specify another repo.
module_file (`str`):
The name of the module file containing the class to look for.
class_name (`str`):
The name of the class to import in the module.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if they
exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
local_files_only (`bool`, *optional*, defaults to `False`):
If `True`, will only try to load the tokenizer configuration from local files.
repo_type (`str`, *optional*):
Specify the repo type (useful when downloading from a space for instance).
code_revision (`str`, *optional*, defaults to `"main"`):
The specific revision to use for the code on the Hub, if the code leaves in a different repository than the
rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based system for
storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Returns:
`typing.Type`: The class, dynamically imported from the module.
Examples:
```python
# Download module `modeling.py` from huggingface.co and cache then extract the class `MyBertModel` from this
# module.
cls = get_class_from_dynamic_module("modeling.MyBertModel", "sgugger/my-bert-model")
# Download module `modeling.py` from a given repo and cache then extract the class `MyBertModel` from this
# module.
cls = get_class_from_dynamic_module("sgugger/my-bert-model--modeling.MyBertModel", "sgugger/another-bert-model")
```"""
use_auth_token = kwargs.pop("use_auth_token", None)
if use_auth_token is not None:
warnings.warn(
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
FutureWarning,
)
if token is not None:
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
token = use_auth_token
# Catch the name of the repo if it's specified in `class_reference`
if "--" in class_reference:
repo_id, class_reference = class_reference.split("--")
else:
repo_id = pretrained_model_name_or_path
module_file, class_name = class_reference.split(".")
if code_revision is None and pretrained_model_name_or_path == repo_id:
code_revision = revision
# And lastly we get the class inside our newly created module
final_module = get_cached_module_file(
repo_id,
module_file + ".py",
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
token=token,
revision=code_revision,
local_files_only=local_files_only,
repo_type=repo_type,
)
return get_class_in_module(class_name, final_module, force_reload=force_download)
def custom_object_save(obj: Any, folder: Union[str, os.PathLike], config: Optional[dict] = None) -> list[str]:
"""
Save the modeling files corresponding to a custom model/configuration/tokenizer etc. in a given folder. Optionally
adds the proper fields in a config.
Args:
obj (`Any`): The object for which to save the module files.
folder (`str` or `os.PathLike`): The folder where to save.
config (`PretrainedConfig` or dictionary, `optional`):
A config in which to register the auto_map corresponding to this custom object.
Returns:
`list[str]`: The list of files saved.
"""
if obj.__module__ == "__main__":
logger.warning(
f"We can't save the code defining {obj} in {folder} as it's been defined in __main__. You should put "
"this code in a separate module so we can include it in the saved folder and make it easier to share via "
"the Hub."
)
return
def _set_auto_map_in_config(_config):
module_name = obj.__class__.__module__
last_module = module_name.split(".")[-1]
full_name = f"{last_module}.{obj.__class__.__name__}"
# Special handling for tokenizers
if "Tokenizer" in full_name:
slow_tokenizer_class = None
fast_tokenizer_class = None
if obj.__class__.__name__.endswith("Fast"):
# Fast tokenizer: we have the fast tokenizer class and we may have the slow one has an attribute.
fast_tokenizer_class = f"{last_module}.{obj.__class__.__name__}"
if getattr(obj, "slow_tokenizer_class", None) is not None:
slow_tokenizer = getattr(obj, "slow_tokenizer_class")
slow_tok_module_name = slow_tokenizer.__module__
last_slow_tok_module = slow_tok_module_name.split(".")[-1]
slow_tokenizer_class = f"{last_slow_tok_module}.{slow_tokenizer.__name__}"
else:
# Slow tokenizer: no way to have the fast class
slow_tokenizer_class = f"{last_module}.{obj.__class__.__name__}"
full_name = (slow_tokenizer_class, fast_tokenizer_class)
if isinstance(_config, dict):
auto_map = _config.get("auto_map", {})
auto_map[obj._auto_class] = full_name
_config["auto_map"] = auto_map
elif getattr(_config, "auto_map", None) is not None:
_config.auto_map[obj._auto_class] = full_name
else:
_config.auto_map = {obj._auto_class: full_name}
# Add object class to the config auto_map
if isinstance(config, (list, tuple)):
for cfg in config:
_set_auto_map_in_config(cfg)
elif config is not None:
_set_auto_map_in_config(config)
result = []
# Copy module file to the output folder.
object_file = sys.modules[obj.__module__].__file__
dest_file = Path(folder) / (Path(object_file).name)
shutil.copy(object_file, dest_file)
result.append(dest_file)
# Gather all relative imports recursively and make sure they are copied as well.
for needed_file in get_relative_import_files(object_file):
dest_file = Path(folder) / (Path(needed_file).name)
shutil.copy(needed_file, dest_file)
result.append(dest_file)
return result
def _raise_timeout_error(signum, frame):
raise ValueError(
"Loading this model requires you to execute custom code contained in the model repository on your local "
"machine. Please set the option `trust_remote_code=True` to permit loading of this model."
)
TIME_OUT_REMOTE_CODE = 15
def resolve_trust_remote_code(
trust_remote_code, model_name, has_local_code, has_remote_code, error_message=None, upstream_repo=None
):
"""
Resolves the `trust_remote_code` argument. If there is remote code to be loaded, the user must opt-in to loading
it.
Args:
trust_remote_code (`bool` or `None`):
User-defined `trust_remote_code` value.
model_name (`str`):
The name of the model repository in huggingface.co.
has_local_code (`bool`):
Whether the model has local code.
has_remote_code (`bool`):
Whether the model has remote code.
error_message (`str`, *optional*):
Custom error message to display if there is remote code to load and the user didn't opt-in. If unset, the error
message will be regarding loading a model with custom code.
Returns:
The resolved `trust_remote_code` value.
"""
if error_message is None:
if upstream_repo is not None:
error_message = (
f"The repository {model_name} references custom code contained in {upstream_repo} which "
f"must be executed to correctly load the model. You can inspect the repository "
f"content at https://hf.co/{upstream_repo} .\n"
)
elif os.path.isdir(model_name):
error_message = (
f"The repository {model_name} contains custom code which must be executed "
f"to correctly load the model. You can inspect the repository "
f"content at {os.path.abspath(model_name)} .\n"
)
else:
error_message = (
f"The repository {model_name} contains custom code which must be executed "
f"to correctly load the model. You can inspect the repository "
f"content at https://hf.co/{model_name} .\n"
)
if trust_remote_code is None:
if has_local_code:
trust_remote_code = False
elif has_remote_code and TIME_OUT_REMOTE_CODE > 0:
prev_sig_handler = None
try:
prev_sig_handler = signal.signal(signal.SIGALRM, _raise_timeout_error)
signal.alarm(TIME_OUT_REMOTE_CODE)
while trust_remote_code is None:
answer = input(
f"{error_message} You can inspect the repository content at https://hf.co/{model_name}.\n"
f"You can avoid this prompt in future by passing the argument `trust_remote_code=True`.\n\n"
f"Do you wish to run the custom code? [y/N] "
)
if answer.lower() in ["yes", "y", "1"]:
trust_remote_code = True
elif answer.lower() in ["no", "n", "0", ""]:
trust_remote_code = False
signal.alarm(0)
except Exception:
# OS which does not support signal.SIGALRM
raise ValueError(
f"{error_message} You can inspect the repository content at https://hf.co/{model_name}.\n"
f"Please pass the argument `trust_remote_code=True` to allow custom code to be run."
)
finally:
if prev_sig_handler is not None:
signal.signal(signal.SIGALRM, prev_sig_handler)
signal.alarm(0)
elif has_remote_code:
# For the CI which puts the timeout at 0
_raise_timeout_error(None, None)
if has_remote_code and not has_local_code and not trust_remote_code:
raise ValueError(
f"{error_message} You can inspect the repository content at https://hf.co/{model_name}.\n"
f"Please pass the argument `trust_remote_code=True` to allow custom code to be run."
)
return trust_remote_code
def check_python_requirements(path_or_repo_id, requirements_file="requirements.txt", **kwargs):
"""
Tries to locate `requirements_file` in a local folder or repo, and confirms that the environment has all the
python dependencies installed.
Args:
path_or_repo_id (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a model repo on huggingface.co.
- a path to a *directory* potentially containing the file.
kwargs (`dict[str, Any]`, *optional*):
Additional arguments to pass to `cached_file`.
"""
failed = [] # error messages regarding requirements
try:
requirements = cached_file(path_or_repo_id=path_or_repo_id, filename=requirements_file, **kwargs)
with open(requirements, "r") as f:
requirements = f.readlines()
for requirement in requirements:
requirement = requirement.strip()
if not requirement or requirement.startswith("#"): # skip empty lines and comments
continue
try:
# e.g. "torch>2.6.0" -> "torch", ">", "2.6.0"
package_name, delimiter, version_number = split_package_version(requirement)
except ValueError: # e.g. "torch", as opposed to "torch>2.6.0"
package_name = requirement
delimiter, version_number = None, None
try:
local_package_version = importlib.metadata.version(package_name)
except importlib.metadata.PackageNotFoundError:
failed.append(f"{requirement} (installed: None)")
continue
if delimiter is not None and version_number is not None:
is_satisfied = VersionComparison.from_string(delimiter)(
version.parse(local_package_version), version.parse(version_number)
)
else:
is_satisfied = True
if not is_satisfied:
failed.append(f"{requirement} (installed: {local_package_version})")
except OSError: # no requirements.txt
pass
if failed:
raise ImportError(
f"Missing requirements in your local environment for `{path_or_repo_id}`:\n" + "\n".join(failed)
)
| transformers/src/transformers/dynamic_module_utils.py/0 | {
"file_path": "transformers/src/transformers/dynamic_module_utils.py",
"repo_id": "transformers",
"token_count": 14260
} | 432 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import warnings
from dataclasses import dataclass
from typing import Any, Optional, Union
import numpy as np
import tensorflow as tf
from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice
from ..modeling_tf_outputs import TFCausalLMOutputWithPast, TFSeq2SeqLMOutput
from ..models.auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
TF_MODEL_FOR_VISION_2_SEQ_MAPPING,
)
from ..tf_utils import shape_list, stable_softmax
from ..utils import ModelOutput, logging
from .configuration_utils import GenerationConfig
from .tf_logits_process import (
TFForcedBOSTokenLogitsProcessor,
TFForcedEOSTokenLogitsProcessor,
TFForceTokensLogitsProcessor,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
TFNoBadWordsLogitsProcessor,
TFNoRepeatNGramLogitsProcessor,
TFRepetitionPenaltyLogitsProcessor,
TFSuppressTokensAtBeginLogitsProcessor,
TFSuppressTokensLogitsProcessor,
TFTemperatureLogitsWarper,
TFTopKLogitsWarper,
TFTopPLogitsWarper,
)
logger = logging.get_logger(__name__)
@dataclass
class TFGreedySearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using greedy search.
Args:
sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: Optional[tf.Tensor] = None
scores: Optional[tuple[tf.Tensor]] = None
attentions: Optional[tuple[tuple[tf.Tensor]]] = None
hidden_states: Optional[tuple[tuple[tf.Tensor]]] = None
@dataclass
class TFGreedySearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: Optional[tf.Tensor] = None
scores: Optional[tuple[tf.Tensor]] = None
encoder_attentions: Optional[tuple[tf.Tensor]] = None
encoder_hidden_states: Optional[tuple[tf.Tensor]] = None
decoder_attentions: Optional[tuple[tuple[tf.Tensor]]] = None
cross_attentions: Optional[tuple[tuple[tf.Tensor]]] = None
decoder_hidden_states: Optional[tuple[tuple[tf.Tensor]]] = None
@dataclass
class TFSampleDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using sampling.
Args:
sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`.
attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(num_return_sequences*batch_size, generated_length, hidden_size)`.
"""
sequences: Optional[tf.Tensor] = None
scores: Optional[tuple[tf.Tensor]] = None
attentions: Optional[tuple[tuple[tf.Tensor]]] = None
hidden_states: Optional[tuple[tuple[tf.Tensor]]] = None
@dataclass
class TFSampleEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of
the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states
attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`.
encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size*num_return_sequences,
num_heads, sequence_length, sequence_length)`.
encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size*num_return_sequences, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)`.
cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_return_sequences, generated_length, hidden_size)`.
"""
sequences: Optional[tf.Tensor] = None
scores: Optional[tuple[tf.Tensor]] = None
encoder_attentions: Optional[tuple[tf.Tensor]] = None
encoder_hidden_states: Optional[tuple[tf.Tensor]] = None
decoder_attentions: Optional[tuple[tuple[tf.Tensor]]] = None
cross_attentions: Optional[tuple[tuple[tf.Tensor]]] = None
decoder_hidden_states: Optional[tuple[tuple[tf.Tensor]]] = None
@dataclass
class TFBeamSearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using beam search.
Args:
sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this
beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`.
beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam indices of generated token id at each generation step. `tf.Tensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
"""
sequences: Optional[tf.Tensor] = None
sequences_scores: Optional[tf.Tensor] = None
scores: Optional[tuple[tf.Tensor]] = None
beam_indices: Optional[tf.Tensor] = None
attentions: Optional[tuple[tuple[tf.Tensor]]] = None
hidden_states: Optional[tuple[tuple[tf.Tensor]]] = None
@dataclass
class TFBeamSearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights
of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states
attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this
beam. `Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam indices of generated token id at each generation step. `tf.Tensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length,
sequence_length)`.
cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`.
"""
sequences: Optional[tf.Tensor] = None
sequences_scores: Optional[tf.Tensor] = None
scores: Optional[tuple[tf.Tensor]] = None
beam_indices: Optional[tf.Tensor] = None
encoder_attentions: Optional[tuple[tf.Tensor]] = None
encoder_hidden_states: Optional[tuple[tf.Tensor]] = None
decoder_attentions: Optional[tuple[tuple[tf.Tensor]]] = None
cross_attentions: Optional[tuple[tuple[tf.Tensor]]] = None
decoder_hidden_states: Optional[tuple[tuple[tf.Tensor]]] = None
@dataclass
class TFBeamSampleDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using beam sample.
Args:
sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this
beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`.
beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam indices of generated token id at each generation step. `tf.Tensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`.
"""
sequences: Optional[tf.Tensor] = None
sequences_scores: Optional[tf.Tensor] = None
scores: Optional[tuple[tf.Tensor]] = None
beam_indices: Optional[tf.Tensor] = None
attentions: Optional[tuple[tuple[tf.Tensor]]] = None
hidden_states: Optional[tuple[tuple[tf.Tensor]]] = None
@dataclass
class TFBeamSampleEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`tf.Tensor` of shape `(batch_size*num_beams, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Final beam scores of the generated `sequences`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log
softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this
beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token),
with each tensor of shape `(batch_size*num_beams, config.vocab_size)`.
beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Beam indices of generated token id at each generation step. `tf.Tensor` of shape
`(batch_size*num_return_sequences, sequence_length)`.
encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size*num_beams, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`.
cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`.
"""
sequences: Optional[tf.Tensor] = None
sequences_scores: Optional[tf.Tensor] = None
scores: Optional[tuple[tf.Tensor]] = None
beam_indices: Optional[tf.Tensor] = None
encoder_attentions: Optional[tuple[tf.Tensor]] = None
encoder_hidden_states: Optional[tuple[tf.Tensor]] = None
decoder_attentions: Optional[tuple[tuple[tf.Tensor]]] = None
cross_attentions: Optional[tuple[tuple[tf.Tensor]]] = None
decoder_hidden_states: Optional[tuple[tuple[tf.Tensor]]] = None
@dataclass
class TFContrastiveSearchDecoderOnlyOutput(ModelOutput):
"""
Base class for outputs of decoder-only generation models using contrastive search.
Args:
sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: Optional[tf.Tensor] = None
scores: Optional[tuple[tf.Tensor]] = None
attentions: Optional[tuple[tuple[tf.Tensor]]] = None
hidden_states: Optional[tuple[tuple[tf.Tensor]]] = None
@dataclass
class TFContrastiveSearchEncoderDecoderOutput(ModelOutput):
"""
Base class for outputs of encoder-decoder generation models using contrastive search. Hidden states and attention
weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the
encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes)
Args:
sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter
if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`):
Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax)
at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each
generated token), with each tensor of shape `(batch_size, config.vocab_size)`.
encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`.
decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of
`tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`.
"""
sequences: Optional[tf.Tensor] = None
scores: Optional[tuple[tf.Tensor]] = None
encoder_attentions: Optional[tuple[tf.Tensor]] = None
encoder_hidden_states: Optional[tuple[tf.Tensor]] = None
decoder_attentions: Optional[tuple[tuple[tf.Tensor]]] = None
cross_attentions: Optional[tuple[tuple[tf.Tensor]]] = None
decoder_hidden_states: Optional[tuple[tuple[tf.Tensor]]] = None
TFGreedySearchOutput = Union[TFGreedySearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput]
TFSampleOutput = Union[TFSampleEncoderDecoderOutput, TFSampleDecoderOnlyOutput]
TFBeamSearchOutput = Union[TFBeamSearchEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput]
TFBeamSampleOutput = Union[TFBeamSampleEncoderDecoderOutput, TFBeamSampleDecoderOnlyOutput]
TFContrastiveSearchOutput = Union[TFContrastiveSearchEncoderDecoderOutput, TFContrastiveSearchDecoderOnlyOutput]
TFGenerateOutput = Union[
TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, TFContrastiveSearchOutput
]
class TFGenerationMixin:
"""
A class containing all of the functions supporting generation, to be used as a mixin in [`TFPreTrainedModel`].
The class exposes [`~generation.TFGenerationMixin.generate`], which can be used for:
- *greedy decoding* by calling [`~generation.TFGenerationMixin.greedy_search`] if `num_beams=1` and
`do_sample=False`
- *contrastive search* by calling [`~generation.TFGenerationMixin.contrastive_search`] if `penalty_alpha>0` and
`top_k>1`
- *multinomial sampling* by calling [`~generation.TFGenerationMixin.sample`] if `num_beams=1` and
`do_sample=True`
- *beam-search decoding* by calling [`~generation.TFGenerationMixin.beam_search`] if `num_beams>1`
You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To
learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
"""
_seed_generator = None
@property
def seed_generator(self):
warnings.warn("`seed_generator` is deprecated and will be removed in a future version.", UserWarning)
if self._seed_generator is None:
self._seed_generator = tf.random.Generator.from_non_deterministic_state()
return self._seed_generator
supports_xla_generation = True
def prepare_inputs_for_generation(self, *args, **kwargs):
raise NotImplementedError(
"A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`."
)
def compute_transition_scores(
self,
sequences: tf.Tensor,
scores: tuple[tf.Tensor],
beam_indices: Optional[tf.Tensor] = None,
normalize_logits: bool = False,
) -> tf.Tensor:
"""
Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was
used). This is a convenient method to quickly obtain the scores of the selected tokens at generation time.
Parameters:
sequences (`tf.Tensor`):
The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or
shorter if all batches finished early due to the `eos_token_id`.
scores (`tuple(tf.Tensor)`):
Transition scores for each vocabulary token at each generation step. Beam transition scores consisting
of log probabilities of tokens conditioned on log softmax of previously generated tokens Tuple of
`tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each
tensor of shape `(batch_size*num_beams, config.vocab_size)`.
beam_indices (`tf.Tensor`, *optional*):
Beam indices of generated token id at each generation step. `tf.Tensor` of shape
`(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at
generate-time.
normalize_logits (`bool`, *optional*, defaults to `False`):
Whether to normalize the logits (which, for legacy reasons, may be unnormalized).
Return:
`tf.Tensor`: A `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing
the transition scores (logits)
Examples:
```python
>>> from transformers import GPT2Tokenizer, TFAutoModelForCausalLM
>>> import numpy as np
>>> tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2")
>>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2")
>>> tokenizer.pad_token_id = tokenizer.eos_token_id
>>> inputs = tokenizer(["Today is"], return_tensors="tf")
>>> # Example 1: Print the scores for each token generated with Greedy Search
>>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True)
>>> transition_scores = model.compute_transition_scores(
... outputs.sequences, outputs.scores, normalize_logits=True
... )
>>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for
>>> # encoder-decoder models, like BART or T5.
>>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1]
>>> generated_tokens = outputs.sequences[:, input_length:]
>>> for tok, score in zip(generated_tokens[0], transition_scores[0]):
... # | token | token string | logits | probability
... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}")
| 262 | the | -1.414 | 24.33%
| 1110 | day | -2.609 | 7.36%
| 618 | when | -2.010 | 13.40%
| 356 | we | -1.859 | 15.58%
| 460 | can | -2.508 | 8.14%
>>> # Example 2: Reconstruct the sequence scores from Beam Search
>>> outputs = model.generate(
... **inputs,
... max_new_tokens=5,
... num_beams=4,
... num_return_sequences=4,
... return_dict_in_generate=True,
... output_scores=True,
... )
>>> transition_scores = model.compute_transition_scores(
... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False
... )
>>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores.
>>> # Tip: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the
>>> # use case, you might want to recompute it with `normalize_logits=True`.
>>> output_length = np.sum(transition_scores.numpy() < 0, axis=1)
>>> length_penalty = model.generation_config.length_penalty
>>> reconstructed_scores = np.sum(transition_scores, axis=1) / (output_length**length_penalty)
>>> print(np.allclose(outputs.sequences_scores, reconstructed_scores))
True
```"""
# 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent
# to a beam search approach were the first (and only) beam is always selected
if beam_indices is None:
beam_indices = tf.tile(tf.expand_dims(tf.range(scores[0].shape[0]), axis=1), [1, len(scores)])
# 2. reshape scores as [batch_size, vocab_size, # generation steps] with # generation steps being
# seq_len - input_length
scores = tf.transpose(tf.reshape(tf.stack(scores), (len(scores), -1)), (1, 0))
scores = tf.reshape(scores, (-1, self.config.vocab_size, scores.shape[-1]))
# 3. Optionally normalize the logits (across the vocab dimension)
if normalize_logits:
scores = tf.nn.log_softmax(scores, axis=1)
# 4. cut beam_indices to longest beam length
beam_indices_mask = beam_indices < 0
max_beam_length = tf.math.reduce_max(
tf.math.reduce_sum((1 - tf.cast(beam_indices_mask, dtype=tf.int32)), axis=-1)
)
beam_indices = beam_indices[:, -max_beam_length:]
beam_indices_mask = beam_indices_mask[:, -max_beam_length:]
# 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards
beam_indices = tf.where(beam_indices_mask, 0, beam_indices)
# 6. Define which indices contributed to scores
cut_idx = sequences.shape[-1] - max_beam_length
token_indices = sequences[:, cut_idx:]
gen_step_idx = tf.broadcast_to(tf.range(scores.shape[-1]), token_indices.shape)
indices = tf.stack([beam_indices, token_indices, gen_step_idx], axis=-1)
# 7. Compute scores
transition_scores = tf.gather_nd(scores, indices)
# 8. Mask out transition_scores of beams that stopped early
transition_scores = tf.where(beam_indices_mask, 0, transition_scores)
return transition_scores
def _validate_model_class(self):
"""
Confirms that the model class is compatible with generation. If not, raises an exception that points to the
right class to use.
"""
if not self.can_generate():
generate_compatible_mappings = [
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_VISION_2_SEQ_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
]
generate_compatible_classes = set()
for model_mapping in generate_compatible_mappings:
supported_models = model_mapping.get(type(self.config), default=None)
if supported_models is not None:
generate_compatible_classes.add(supported_models.__name__)
exception_message = (
f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as "
"it doesn't have a language model head."
)
if generate_compatible_classes:
exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}"
raise TypeError(exception_message)
def _validate_model_kwargs(self, model_kwargs: dict[str, Any]):
"""Validates model kwargs for generation. Generate argument typos will also be caught here."""
# Excludes arguments that are handled before calling any model function
if self.config.is_encoder_decoder:
for key in ["decoder_input_ids"]:
model_kwargs.pop(key, None)
unused_model_args = []
model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
# `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
# `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
if "kwargs" in model_args or "model_kwargs" in model_args:
model_args |= set(inspect.signature(self.call).parameters)
for key, value in model_kwargs.items():
if value is not None and key not in model_args:
unused_model_args.append(key)
if unused_model_args:
raise ValueError(
f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
" generate arguments will also show up in this list)"
)
def generate(
self,
inputs: Optional[tf.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[TFLogitsProcessorList] = None,
seed=None,
**kwargs,
) -> Union[TFGenerateOutput, tf.Tensor]:
r"""
Generates sequences of token ids for models with a language modeling head.
<Tip warning={true}>
Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the
model's default generation configuration. You can override any `generation_config` by passing the corresponding
parameters to generate, e.g. `.generate(inputs, num_beams=4, do_sample=True)`.
For an overview of generation strategies and code examples, check out the [following
guide](../generation_strategies).
</Tip>
Parameters:
inputs (`tf.Tensor` of varying shape depending on the modality, *optional*):
The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`
should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of
`input_ids`, `input_values`, `input_features`, or `pixel_values`.
generation_config (`~generation.GenerationConfig`, *optional*):
The generation configuration to be used as base parametrization for the generation call. `**kwargs`
passed to generate matching the attributes of `generation_config` will override them. If
`generation_config` is not provided, the default will be used, which had the following loading
priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
default values, whose documentation should be checked to parameterize generation.
logits_processor (`LogitsProcessorList`, *optional*):
Custom logits processors that complement the default logits processors built from arguments and
generation config. If a logit processor is passed that is already created with the arguments or a
generation config an error is thrown. This feature is intended for advanced users.
seed (`list[int]`, *optional*):
Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the
`seed` argument from stateless functions in `tf.random`.
kwargs (`dict[str, Any]`, *optional*):
Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
Return:
[`~utils.ModelOutput`] or `tf.Tensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when
`config.return_dict_in_generate=True`) or a `tf.Tensor`.
If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible
[`~utils.ModelOutput`] types are:
- [`~generation.TFGreedySearchDecoderOnlyOutput`],
- [`~generation.TFSampleDecoderOnlyOutput`],
- [`~generation.TFBeamSearchDecoderOnlyOutput`],
- [`~generation.TFBeamSampleDecoderOnlyOutput`]
If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible
[`~utils.ModelOutput`] types are:
- [`~generation.TFGreedySearchEncoderDecoderOutput`],
- [`~generation.TFSampleEncoderDecoderOutput`],
- [`~generation.TFBeamSearchEncoderDecoderOutput`],
- [`~generation.TFBeamSampleEncoderDecoderOutput`]
"""
# 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
self._validate_model_class()
# priority: `generation_config` argument > `model.generation_config` (the default generation config)
if generation_config is None:
# legacy: users may modify the model configuration to control generation. To trigger this legacy behavior,
# two conditions must be met
# 1) the generation config must have been created from the model config (`_from_model_config` field);
# 2) the generation config must have seen no modification since its creation (the hash is the same).
if self.generation_config._from_model_config and self.generation_config._original_object_hash == hash(
self.generation_config
):
new_generation_config = GenerationConfig.from_model_config(self.config)
if new_generation_config != self.generation_config:
warnings.warn(
"You have modified the pretrained model configuration to control generation. This is a"
" deprecated strategy to control generation and will be removed soon, in a future version."
" Please use and modify the model generation configuration (see"
" https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )"
)
self.generation_config = new_generation_config
generation_config = self.generation_config
generation_config = copy.deepcopy(generation_config)
model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
self._validate_model_kwargs(model_kwargs.copy())
# 2. Cast input dtypes to tf.int32 unless they're floats (which happens for some image models)
if inputs is not None:
if isinstance(inputs, tf.Tensor) and inputs.dtype.is_floating:
pass
elif isinstance(inputs, np.ndarray) and np.issubdtype(inputs.dtype, np.floating):
pass
else:
inputs = tf.cast(inputs, tf.int32)
if model_kwargs.get("attention_mask") is not None:
model_kwargs["attention_mask"] = tf.cast(model_kwargs["attention_mask"], tf.int32)
if "decoder_input_ids" in model_kwargs:
if (
isinstance(model_kwargs["decoder_input_ids"], tf.Tensor)
and model_kwargs["decoder_input_ids"].dtype.is_floating
):
pass
elif isinstance(model_kwargs["decoder_input_ids"], np.ndarray) and np.issubdtype(
model_kwargs["decoder_input_ids"].dtype, np.floating
):
pass
else:
model_kwargs["decoder_input_ids"] = tf.cast(model_kwargs["decoder_input_ids"], tf.int32)
# 3. Set generation parameters if not already defined
logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
if model_kwargs.get("attention_mask") is None:
logger.warning(
"The attention mask and the pad token id were not set. As a consequence, you may observe "
"unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
)
eos_token_id = generation_config.eos_token_id
if isinstance(eos_token_id, list):
eos_token_id = eos_token_id[0]
generation_config.pad_token_id = eos_token_id
use_xla = not tf.executing_eagerly()
if use_xla and not self.supports_xla_generation:
raise ValueError(
"The selected model does not support Graph mode nor XLA generation (e.g. from tf.function())"
)
# 4. Define model inputs
inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
inputs, generation_config.bos_token_id, model_kwargs
)
# inputs_ids now has to be defined and cannot be None anymore
batch_size = shape_list(inputs_tensor)[0]
# 5. Prepare other model kwargs
model_kwargs["output_attentions"] = generation_config.output_attentions
model_kwargs["output_hidden_states"] = generation_config.output_hidden_states
model_kwargs["use_cache"] = generation_config.use_cache
accepts_attention_mask = "attention_mask" in set(inspect.signature(self.call).parameters.keys())
requires_attention_mask = "encoder_outputs" not in model_kwargs
if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask:
model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation(
inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id
)
# decoder-only models should use left-padding for generation
if not self.config.is_encoder_decoder:
if generation_config.pad_token_id is not None and tf.math.reduce_any(
inputs_tensor[:, -1] == generation_config.pad_token_id
):
logger.warning(
"A decoder-only architecture is being used, but right-padding was detected! For correct "
"generation results, please set `padding_side='left'` when initializing the tokenizer."
)
if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs:
# if model is encoder decoder encoder_outputs are created and added to `model_kwargs`
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
inputs_tensor, model_kwargs, model_input_name
)
# 6. Prepare model inputs which will be used for auto-regressive generation
if self.config.is_encoder_decoder:
input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
batch_size=batch_size,
model_input_name=model_input_name,
model_kwargs=model_kwargs,
decoder_start_token_id=generation_config.decoder_start_token_id,
bos_token_id=generation_config.bos_token_id,
)
else:
input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids")
# 7. Prepare `max_length` depending on other stopping criteria.
input_ids_seq_length = shape_list(input_ids)[-1]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20:
# 20 is the default max_length of the generation config
warnings.warn(
f"Using the model-agnostic default `max_length` (={generation_config.max_length}) "
"to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.",
UserWarning,
)
elif generation_config.max_new_tokens is not None:
if not has_default_max_length and generation_config.max_length is not None:
logger.warning(
f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
"Please refer to the documentation for more information. "
"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
)
generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
# If the input length is a tensor (i.e. dynamic length), skip length checks
if not isinstance(input_ids_seq_length, tf.Tensor):
if (
generation_config.min_length is not None
and generation_config.min_length > generation_config.max_length
):
raise ValueError(
f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger"
f" than the maximum length ({generation_config.max_length})"
)
if input_ids_seq_length >= generation_config.max_length:
input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
logger.warning(
f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
" increasing`max_new_tokens`."
)
# 8. determine generation mode
is_contrastive_search_gen_mode = (
generation_config.top_k is not None
and generation_config.top_k > 1
and generation_config.do_sample is False
and generation_config.penalty_alpha is not None
and generation_config.penalty_alpha > 0
)
is_greedy_gen_mode = (
not is_contrastive_search_gen_mode
and (generation_config.num_beams == 1)
and generation_config.do_sample is False
)
is_beam_gen_mode = (
not is_contrastive_search_gen_mode
and (generation_config.num_beams > 1)
and generation_config.do_sample is False
)
is_sample_gen_mode = (generation_config.num_beams == 1) and generation_config.do_sample is True
is_beam_sample_gen_mode = (generation_config.num_beams > 1) and generation_config.do_sample is True
# 9. prepare distribution pre_processing samplers
logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_seq_length,
logits_processor=logits_processor,
)
# 10. go into different generation modes
if is_greedy_gen_mode:
if generation_config.num_return_sequences > 1:
raise ValueError(
f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing"
" greedy search."
)
# 11. run greedy search
return self.greedy_search(
input_ids,
max_length=generation_config.max_length,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
logits_processor=logits_processor,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
**model_kwargs,
)
elif is_contrastive_search_gen_mode:
if generation_config.num_return_sequences > 1:
raise ValueError(
f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing"
" contrastive search."
)
# 11. run contrastive search
return self.contrastive_search(
input_ids,
top_k=generation_config.top_k,
penalty_alpha=generation_config.penalty_alpha,
logits_processor=logits_processor,
max_length=generation_config.max_length,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
**model_kwargs,
)
elif is_sample_gen_mode:
# 11. prepare logits warper
logits_warper = self._get_logits_warper(generation_config=generation_config)
# 12. expand input_ids with `num_return_sequences` additional sequences per batch
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_return_sequences,
is_encoder_decoder=self.config.is_encoder_decoder,
**model_kwargs,
)
# 13. run sample
return self.sample(
input_ids,
logits_processor=logits_processor,
logits_warper=logits_warper,
max_length=generation_config.max_length,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
seed=seed,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
**model_kwargs,
)
elif is_beam_gen_mode:
if generation_config.num_beams < generation_config.num_return_sequences:
raise ValueError(
"Beam search decoding cannot return more sequences than it has beams. Please set num_beams >="
f" num_return_sequences, got {generation_config.num_beams} and"
f" {generation_config.num_return_sequences} (respectively)"
)
# 11. broadcast inputs to the desired number of beams
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
expand_in_new_axis=True,
**model_kwargs,
)
# 12. run beam search
return self.beam_search(
input_ids,
max_length=generation_config.max_length,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
length_penalty=generation_config.length_penalty,
early_stopping=generation_config.early_stopping,
logits_processor=logits_processor,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
num_return_sequences=generation_config.num_return_sequences,
**model_kwargs,
)
elif is_beam_sample_gen_mode:
if generation_config.num_beams < generation_config.num_return_sequences:
raise ValueError(
"Beam search decoding cannot return more sequences than it has beams. Please set num_beams >="
f" num_return_sequences, got {generation_config.num_beams} and"
f" {generation_config.num_return_sequences} (respectively)"
)
# 11. prepare logits warper
logits_warper = self._get_logits_warper(generation_config=generation_config)
# 12. broadcast inputs to the desired number of beams
input_ids, model_kwargs = self._expand_inputs_for_generation(
input_ids=input_ids,
expand_size=generation_config.num_beams,
is_encoder_decoder=self.config.is_encoder_decoder,
expand_in_new_axis=True,
**model_kwargs,
)
# 13. run beam sample (beam search with sampling)
return self.beam_search(
input_ids,
do_sample=True,
max_length=generation_config.max_length,
pad_token_id=generation_config.pad_token_id,
eos_token_id=generation_config.eos_token_id,
length_penalty=generation_config.length_penalty,
early_stopping=generation_config.early_stopping,
logits_processor=logits_processor,
logits_warper=logits_warper,
output_scores=generation_config.output_scores,
return_dict_in_generate=generation_config.return_dict_in_generate,
num_return_sequences=generation_config.num_return_sequences,
**model_kwargs,
)
def _prepare_attention_mask_for_generation(
self,
inputs: tf.Tensor,
pad_token_id: Optional[int],
eos_token_id: Optional[int],
) -> tf.Tensor:
is_input_ids = len(inputs.shape) == 2 and inputs.dtype in (tf.int32, tf.int64)
is_pad_token_in_inputs = (pad_token_id is not None) and tf.math.reduce_any(inputs == pad_token_id)
is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id)
# Check if input is input_ids and padded -> only then is attention_mask defined
if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id:
return tf.cast(tf.math.not_equal(inputs, pad_token_id), dtype=tf.int32)
else:
return tf.ones(inputs.shape[:2], dtype=tf.int32)
def _prepare_encoder_decoder_kwargs_for_generation(
self, inputs_tensor: tf.Tensor, model_kwargs, model_input_name: Optional[str] = None
) -> dict[str, Any]:
# 1. get encoder and store encoder outputs
encoder = self.get_encoder()
# 2. prepare encoder args and encoder kwargs from model kwargs
irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"]
encoder_kwargs = {
argument: value
for argument, value in model_kwargs.items()
if not any(argument.startswith(p) for p in irrelevant_prefix)
}
encoder_signature = set(inspect.signature(encoder.call).parameters)
encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature
if not encoder_accepts_wildcard:
encoder_kwargs = {
argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature
}
# 3. vision models don't use `attention_mask`.
encoder_kwargs["return_dict"] = True
encoder_kwargs[model_input_name] = inputs_tensor
if model_input_name != self.main_input_name: # in Keras, the first input must always be passed
encoder_kwargs[self.main_input_name] = None
encoder_outputs = encoder(**encoder_kwargs)
model_kwargs["encoder_outputs"] = encoder_outputs
return model_kwargs
def _prepare_decoder_input_ids_for_generation(
self,
batch_size: int,
model_input_name: str,
model_kwargs: dict[str, tf.Tensor],
decoder_start_token_id: Optional[int] = None,
bos_token_id: Optional[int] = None,
) -> tuple[tf.Tensor, dict[str, tf.Tensor]]:
"""Prepares `decoder_input_ids` for generation with encoder-decoder models"""
# 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming,
# we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input.
if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
decoder_input_ids = model_kwargs.pop("decoder_input_ids")
elif "input_ids" in model_kwargs and model_input_name != "input_ids":
decoder_input_ids = model_kwargs.pop("input_ids")
else:
decoder_input_ids = None
# 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that.
decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
decoder_input_ids_start = tf.ones((batch_size, 1), dtype=tf.int32) * decoder_start_token_id
# no user input -> use decoder_start_token_id as decoder_input_ids
if decoder_input_ids is None:
decoder_input_ids = decoder_input_ids_start
# user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust
# decoder_attention_mask if provided)
elif tf.reduce_all(decoder_input_ids[:, 0] != decoder_start_token_id):
decoder_input_ids = tf.concat([decoder_input_ids_start, decoder_input_ids], axis=-1)
if "decoder_attention_mask" in model_kwargs:
decoder_attention_mask = model_kwargs["decoder_attention_mask"]
decoder_attention_mask = tf.concat(
(tf.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask),
axis=-1,
)
model_kwargs["decoder_attention_mask"] = decoder_attention_mask
return decoder_input_ids, model_kwargs
def _get_decoder_start_token_id(
self, decoder_start_token_id: Optional[int] = None, bos_token_id: Optional[int] = None
) -> int:
# retrieve decoder_start_token_id for encoder-decoder models
# fall back to bos_token_id if necessary
decoder_start_token_id = (
decoder_start_token_id
if decoder_start_token_id is not None
else self.generation_config.decoder_start_token_id
)
bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
if decoder_start_token_id is not None:
return decoder_start_token_id
elif bos_token_id is not None:
return bos_token_id
raise ValueError(
"`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
)
@staticmethod
def _expand_inputs_for_generation(
expand_size: int = 1,
is_encoder_decoder: bool = False,
input_ids: Optional[tf.Tensor] = None,
expand_in_new_axis: bool = False,
**model_kwargs,
) -> tuple[tf.Tensor, dict[str, Any]]:
"""
Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...] or [batch_size, expand_size, ...],
depending on `expand_in_new_axis`. Beam-based approaches expect this function to be used with
`expand_in_new_axis=True`
"""
def _expand_tensor(tensor: tf.Tensor):
if expand_in_new_axis:
shape = shape_list(tensor)
return tf.broadcast_to(tensor[:, None], (shape[0], expand_size) + tuple(shape[1:]))
else:
return tf.repeat(tensor, expand_size, axis=0)
def _expand_dict_for_generation(dict_to_expand):
for key in dict_to_expand:
if dict_to_expand[key] is not None and isinstance(dict_to_expand[key], tf.Tensor):
dict_to_expand[key] = _expand_tensor(dict_to_expand[key])
return dict_to_expand
if input_ids is not None:
input_ids = _expand_tensor(input_ids)
model_kwargs = _expand_dict_for_generation(model_kwargs)
if is_encoder_decoder:
if model_kwargs.get("encoder_outputs") is None:
raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.")
model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"])
return input_ids, model_kwargs
def _prepare_model_inputs(
self,
inputs: Optional[tf.Tensor] = None,
bos_token_id: Optional[int] = None,
model_kwargs: Optional[dict[str, tf.Tensor]] = None,
) -> tuple[tf.Tensor, Optional[str], dict[str, tf.Tensor]]:
"""
This function extracts the model-specific `inputs` for generation.
"""
# 1. retrieve all kwargs that are non-None or non-model input related.
# some encoder-decoder models have different names for model and encoder
if (
self.config.is_encoder_decoder
and hasattr(self, "encoder")
and hasattr(self.encoder, "main_input_name")
and self.encoder.main_input_name != self.main_input_name
):
input_name = self.encoder.main_input_name
else:
input_name = self.main_input_name
model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name}
# 2. check whether model_input_name is passed as kwarg
# if yes and `inputs` is None use kwarg inputs
inputs_kwarg = model_kwargs.pop(input_name, None)
if inputs_kwarg is not None and inputs is not None:
raise ValueError(
f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. "
f"Make sure to either pass {inputs} or {input_name}=..."
)
elif inputs_kwarg is not None:
inputs = inputs_kwarg
# 3. In the presence of `inputs_embeds` for text models:
# - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model
# doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with
# input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`)
# - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and
# pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states.
if input_name == "input_ids" and "inputs_embeds" in model_kwargs:
if not self.config.is_encoder_decoder:
has_inputs_embeds_forwarding = "inputs_embeds" in set(
inspect.signature(self.prepare_inputs_for_generation).parameters.keys()
)
if not has_inputs_embeds_forwarding:
raise ValueError(
f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} "
"doesn't have its forwarding implemented. See the GPT2 implementation for an example "
"(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!"
)
# In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of
# the attention mask) can rely on the actual model input.
model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation(
inputs, bos_token_id, model_kwargs=model_kwargs
)
else:
if inputs is not None:
raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.")
inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds"
# 4. if `inputs` is still None, try to create `input_ids` from BOS token
inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs)
return inputs, input_name, model_kwargs
def _maybe_initialize_input_ids_for_generation(
self,
inputs: Optional[tf.Tensor] = None,
bos_token_id: Optional[int] = None,
model_kwargs: Optional[dict[str, tf.Tensor]] = None,
) -> tf.Tensor:
"""Initializes input ids for generation, if necessary."""
if inputs is not None:
return inputs
encoder_outputs = model_kwargs.get("encoder_outputs")
if self.config.is_encoder_decoder and encoder_outputs is not None:
# make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding
shape = encoder_outputs.last_hidden_state.shape[:-1]
return tf.ones(shape, dtype=tf.int32) * -100
if bos_token_id is None:
raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.")
# If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with
# soft-prompting or in multimodal implementations built on top of decoder-only language models.
batch_size = 1
for value in model_kwargs.values():
if isinstance(value, tf.Tensor):
batch_size = value.shape[0]
break
return tf.ones((batch_size, 1), dtype=tf.int32) * bos_token_id
@staticmethod
def _extract_past_from_model_output(outputs: ModelOutput):
past_key_values = None
if "past_key_values" in outputs:
past_key_values = outputs.past_key_values
elif "mems" in outputs:
past_key_values = outputs.mems
elif "past_buckets_states" in outputs:
past_key_values = outputs.past_buckets_states
return past_key_values
def _update_model_kwargs_for_generation(
self, outputs: ModelOutput, model_kwargs: dict[str, Any], is_encoder_decoder: bool = False
) -> dict[str, Any]:
# update past_key_values
model_kwargs["past_key_values"] = self._extract_past_from_model_output(outputs)
# update attention mask
if not is_encoder_decoder:
if "attention_mask" in model_kwargs:
attention_mask = model_kwargs["attention_mask"]
model_kwargs["attention_mask"] = tf.concat(
[attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1
)
return model_kwargs
def _update_model_kwargs_for_xla_generation(
self,
model_outputs: ModelOutput,
model_kwargs: dict[str, Any],
cur_len: int,
max_length: int,
batch_size: int,
is_encoder_decoder: bool = False,
batch_axis: int = 0,
):
def _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder):
"""initializes the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`"""
if is_encoder_decoder:
# One 1 for decoder_start_token_id, 0s for the currently-unfilled locations in the past_key_values tensor,
# 1s for the actual input_ids
decoder_attention_mask = tf.concat(
[
tf.ones((batch_size, 1), dtype=tf.int32),
tf.zeros((batch_size, num_padding_values), dtype=tf.int32),
tf.ones((batch_size, 1), dtype=tf.int32),
],
axis=1,
)
mask = {"decoder_attention_mask": decoder_attention_mask}
else:
attention_mask = model_kwargs.pop("attention_mask")
# 0s for the currently-unfilled locations in the past_key_values tensor, 1s for the actual input_ids
attention_mask = tf.concat(
[
attention_mask,
tf.zeros((batch_size, num_padding_values), dtype=attention_mask.dtype),
tf.ones((batch_size, 1), dtype=attention_mask.dtype),
],
axis=1,
)
mask = {"attention_mask": attention_mask}
return mask
def _update_attention(model_kwargs, new_past_index, is_encoder_decoder):
"""updates the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`"""
update_start = tf.constant([0, 1], dtype=tf.int32) * new_past_index
if is_encoder_decoder:
decoder_attention_mask = model_kwargs.pop("decoder_attention_mask")
decoder_attention_mask_update_slice = tf.ones((batch_size, 1), dtype=decoder_attention_mask.dtype)
decoder_attention_mask = dynamic_update_slice(
decoder_attention_mask, decoder_attention_mask_update_slice, update_start
)
mask = {"decoder_attention_mask": decoder_attention_mask}
else:
attention_mask = model_kwargs.pop("attention_mask")
attention_mask_update_slice = tf.ones((batch_size, 1), dtype=attention_mask.dtype)
attention_mask = dynamic_update_slice(attention_mask, attention_mask_update_slice, update_start)
mask = {"attention_mask": attention_mask}
return mask
def _initialize_past(past_key_values, num_padding_values, batch_axis):
"""initialize past_key_values with zeros -- the structure depends on `batch_axis`"""
if batch_axis == 0:
padding_values = tf.constant([[0, 0], [0, 0], [0, num_padding_values], [0, 0]], dtype=tf.int32)
new_past = ()
for past_layer in past_key_values:
new_past_layer = list(past_layer)
for i in range(len(new_past_layer[:2])):
new_past_layer[i] = tf.pad(past_layer[i], padding_values)
new_past += (tuple(new_past_layer),)
else:
padding_values = tf.scatter_nd(indices=[[3, 1]], updates=[num_padding_values], shape=(5, 2))
new_past = list(past_key_values)
for i in range(len(past_key_values)):
new_past[i] = tf.pad(past_key_values[i], padding_values)
return new_past
def _update_past(past_key_values, new_past_index, batch_axis):
if batch_axis == 0:
slice_start_base = tf.constant([0, 0, 1, 0])
new_past = ()
for past_layer in past_key_values:
new_past_layer = list(past_layer)
for i in range(len(new_past_layer[:2])):
update_slice = past_layer[i][:, :, -1:]
# Write the last slice to the first open location in the padded past_key_values array
# and then truncate the last slice off the array
new_past_layer[i] = dynamic_update_slice(
past_layer[i][:, :, :-1], update_slice, slice_start_base * new_past_index
)
new_past += (tuple(new_past_layer),)
else:
slice_start_base = tf.constant([0, 0, 0, 1, 0])
new_past = [None for _ in range(len(past_key_values))]
for i in range(len(past_key_values)):
update_slice = past_key_values[i][:, :, :, -1:]
# Write the last slice to the first open location in the padded past_key_values array
# and then truncate the last slice off the array
new_past[i] = dynamic_update_slice(
past_key_values[i][:, :, :, :-1], update_slice, slice_start_base * new_past_index
)
return new_past
past_key_values = self._extract_past_from_model_output(model_outputs)
if past_key_values is None:
raise ValueError(
"No known `past_key_values variable` found in model outputs (model outputs keys:"
f" {list(model_outputs.keys())})"
)
is_past_initialized = model_kwargs.pop("past_key_values", None) is not None
if not is_past_initialized:
# The padded version of `past_key_values` has a length of `max_length - 1`, as `past_key_values` holds information relative to
# previous autoregressive generation steps (step 0 has no past_key_values, step 1 has 1 past_key_values value, ..., the last step
# has `max_length - 1` past_key_values values).
num_padding_values = max_length - cur_len - 1
mask = _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder)
new_past = _initialize_past(past_key_values, num_padding_values, batch_axis)
else:
# The new index of past_key_values to be filled corresponds to the current length of the sequence, with two
# subtractions: -1 because past_key_values holds information regarding previous generation steps (read comment above)
# and -1 again because in an array the index is the length of the array minus 1.
new_past_index = cur_len - 2
mask = _update_attention(model_kwargs, new_past_index, is_encoder_decoder)
new_past = _update_past(past_key_values, new_past_index, batch_axis)
# sets the updated variables (mask and past_key_values)
model_kwargs.update(mask)
model_kwargs["past_key_values"] = tuple(new_past)
return model_kwargs
def _get_logits_warper(
self,
generation_config: GenerationConfig,
) -> TFLogitsProcessorList:
"""
This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsWarper`]
instances used for multinomial sampling.
"""
# instantiate warpers list
warpers = TFLogitsProcessorList()
# In beam methods, we need to keep at least one non-eos token to explore continuations that might have a
# better score (i.e. keep len(generation_config.eos_token_id) + 1)
if generation_config.num_beams > 1:
if isinstance(generation_config.eos_token_id, list):
min_tokens_to_keep = len(generation_config.eos_token_id) + 1
else:
min_tokens_to_keep = 2
else:
min_tokens_to_keep = 1
if generation_config.temperature is not None and generation_config.temperature != 1.0:
warpers.append(TFTemperatureLogitsWarper(generation_config.temperature))
if generation_config.top_k is not None and generation_config.top_k != 0:
warpers.append(TFTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep))
if generation_config.top_p is not None and generation_config.top_p < 1.0:
warpers.append(TFTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep))
return warpers
def _get_logits_processor(
self,
generation_config: GenerationConfig,
input_ids_seq_length: int,
logits_processor: Optional[TFLogitsProcessorList],
) -> TFLogitsProcessorList:
"""
This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsProcessor`]
instances used to modify the scores of the language model head.
"""
processors = TFLogitsProcessorList()
# instantiate processors list
if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0:
processors.append(TFRepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty))
if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0:
processors.append(TFNoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size))
if generation_config.bad_words_ids is not None:
processors.append(
TFNoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id)
)
if (
generation_config.min_length is not None
and generation_config.eos_token_id is not None
and generation_config.min_length > 0
):
processors.append(TFMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id))
if generation_config.forced_bos_token_id is not None:
processors.append(TFForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id))
if generation_config.forced_eos_token_id is not None:
processors.append(
TFForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id)
)
if generation_config.suppress_tokens is not None:
processors.append(TFSuppressTokensLogitsProcessor(generation_config.suppress_tokens))
if generation_config.begin_suppress_tokens is not None:
begin_index = input_ids_seq_length
begin_index = (
begin_index
if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None)
else begin_index + 1
)
if getattr(generation_config, "forced_decoder_ids", None) is not None:
begin_index += generation_config.forced_decoder_ids[-1][
0
] # generation starts after the last token that is forced
processors.append(
TFSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)
)
if getattr(generation_config, "forced_decoder_ids", None) is not None:
processors.append(TFForceTokensLogitsProcessor(generation_config.forced_decoder_ids))
processors = self._merge_criteria_processor_list(processors, logits_processor)
return processors
def _merge_criteria_processor_list(
self,
default_list: TFLogitsProcessorList,
custom_list: TFLogitsProcessorList,
) -> TFLogitsProcessorList:
if len(custom_list) == 0:
return default_list
for default in default_list:
for custom in custom_list:
if type(custom) is type(default):
object_type = "logits processor"
raise ValueError(
f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to"
f" `generate`, but it has already been created with the values {default}. {default} has been"
" created by passing the corresponding arguments to generate or by the model's config default"
f" values. If you just want to change the default values of {object_type} consider passing"
f" them as arguments to `generate` instead of using a custom {object_type}."
)
default_list.extend(custom_list)
return default_list
def greedy_search(
self,
input_ids: tf.Tensor,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
logits_processor: Optional[TFLogitsProcessorList] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[TFGreedySearchOutput, tf.Tensor]:
r"""
Generates sequences for models with a language modeling head using greedy decoding.
Parameters:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (`TFLogitsProcessorList`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
max_length (`int`, *optional*, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, list[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
model_kwargs:
Additional model specific keyword arguments will be forwarded to the `call` function of the model. If
model is an encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.TFGreedySearchDecoderOnlyOutput`], [`~generation.TFGreedySearchEncoderDecoderOutput`] or
`tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a
[`~generation.TFGreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.TFGreedySearchEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import (
... AutoTokenizer,
... TFAutoModelForCausalLM,
... TFLogitsProcessorList,
... TFMinLengthLogitsProcessor,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
>>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2")
>>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token
>>> model.generation_config.pad_token_id = model.generation_config.eos_token_id
>>> input_prompt = "Today is a beautiful day, and"
>>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids
>>> # instantiate logits processors
>>> logits_processor = TFLogitsProcessorList(
... [
... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id),
... ]
... )
>>> outputs = model.greedy_search(input_ids, logits_processor=logits_processor)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
["Today is a beautiful day, and I'm so happy to be here. I'm so happy to"]
```"""
# 1. init greedy_search values
logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
max_length = max_length if max_length is not None else self.generation_config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache)
use_xla = not tf.executing_eagerly()
# TODO (Joao): fix cache format or find programmatic way to detect cache index
# GPT2 and other models has a slightly different cache structure, with a different batch axis
model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self)
cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0
# some models, like XLNet, need more than the last token in the presence of past_key_values
needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys())
# 2. init `attentions`, `hidden_states`, and `scores` tuples
scores = [] if (return_dict_in_generate and output_scores) else None
decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None
cross_attentions = [] if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None
# 3. init tensors to use for "xla-compileable" generate function
batch_size, cur_len = shape_list(input_ids)
# initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences`
input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0)
generated = tf.concat([input_ids, input_ids_padding], axis=-1)
finished_sequences = tf.zeros((batch_size,), dtype=tf.bool)
# 4. define "xla-compile-able" stop-condition and auto-regressive function
# define condition fn
def greedy_search_cond_fn(generated, finished_sequences, cur_len, model_kwargs):
"""state termination condition fn."""
return ~tf.reduce_all(finished_sequences)
# define condition fn
def greedy_search_body_fn(generated, finished_sequences, cur_len, model_kwargs):
"""state update fn."""
if model_kwargs.get("past_key_values") is None or needs_full_input:
input_ids = generated[:, :cur_len]
else:
input_ids = tf.expand_dims(generated[:, cur_len - 1], -1)
model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs)
# forward pass to get next token logits
model_outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
next_token_logits = model_outputs.logits[:, -1]
# pre-process distribution
next_tokens_scores = logits_processor(generated, next_token_logits, cur_len)
# Store scores, attentions and hidden_states when required
if not use_xla and return_dict_in_generate:
if output_scores:
scores.append(next_tokens_scores)
if output_attentions and self.config.is_encoder_decoder:
decoder_attentions.append(model_outputs.decoder_attentions)
elif output_attentions and not self.config.is_encoder_decoder:
decoder_attentions.append(model_outputs.attentions)
if self.config.is_encoder_decoder:
cross_attentions.append(model_outputs.cross_attentions)
if output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(model_outputs.decoder_hidden_states)
elif output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(model_outputs.hidden_states)
# argmax
next_tokens = tf.argmax(next_tokens_scores, axis=-1, output_type=tf.int32)
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32)
next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq)
next_token_is_eos = tf.math.reduce_any(
tf.equal(
tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1)
),
axis=0,
)
finished_sequences = finished_sequences | next_token_is_eos
# update `generated` and `cur_len`
update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1)
generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens)
cur_len += 1
# update model_kwargs
if use_xla:
model_kwargs = self._update_model_kwargs_for_xla_generation(
model_outputs=model_outputs,
model_kwargs=model_kwargs,
cur_len=cur_len,
max_length=max_length,
batch_size=batch_size,
is_encoder_decoder=self.config.is_encoder_decoder,
batch_axis=cache_batch_axis,
)
else:
model_kwargs = self._update_model_kwargs_for_generation(
model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
# if we don't cache past_key_values key values we need the whole input
if model_kwargs.get("past_key_values", None) is None:
# let's throw out `past_key_values` since we don't want `None` tensors
model_kwargs.pop("past_key_values", None)
return generated, finished_sequences, cur_len, model_kwargs
# 5. run generation
# 1st generation step has to be run before to initialize `past_key_values`
generated, finished_sequences, cur_len, model_kwargs = greedy_search_body_fn(
generated, finished_sequences, cur_len, model_kwargs
)
# 2-to-n generation steps can then be run in autoregressive fashion
# only in case 1st generation step does NOT yield EOS token though
maximum_iterations = max_length - cur_len
generated, _, cur_len, _ = tf.while_loop(
greedy_search_cond_fn,
greedy_search_body_fn,
(generated, finished_sequences, cur_len, model_kwargs),
maximum_iterations=maximum_iterations,
)
# 6. prepare outputs
if not use_xla:
# cut for backward compatibility
generated = generated[:, :cur_len]
if return_dict_in_generate:
if self.config.is_encoder_decoder:
# if model is an encoder-decoder, retrieve encoder attention weights
# and hidden states
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
scores = tuple(scores) if scores is not None else None
decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None
cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None
decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None
return TFGreedySearchEncoderDecoderOutput(
sequences=generated,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return TFGreedySearchDecoderOnlyOutput(
sequences=generated,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return generated
def sample(
self,
input_ids: tf.Tensor,
logits_processor: Optional[TFLogitsProcessorList] = None,
logits_warper: Optional[TFLogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
seed: Optional[tuple[int, int]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[TFSampleOutput, tf.Tensor]:
r"""
Generates sequences for models with a language modeling head using multinomial sampling.
Parameters:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
logits_processor (`TFLogitsProcessorList`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
logits_warper (`TFLogitsProcessorList`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`]
used to warp the prediction score distribution of the language modeling head applied before multinomial
sampling at each generation step.
max_length (`int`, *optional*, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, list[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
seed (`list[int]`, *optional*):
Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the
`seed` argument from stateless functions in `tf.random`.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
model_kwargs:
Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an
encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.TFSampleDecoderOnlyOutput`], [`~generation.TFSampleEncoderDecoderOutput`] or `tf.Tensor`: A
`tf.Tensor` containing the generated tokens (default behaviour) or a
[`~generation.TFSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.TFSampleEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> import tensorflow as tf
>>> from transformers import (
... AutoTokenizer,
... TFAutoModelForCausalLM,
... TFLogitsProcessorList,
... TFMinLengthLogitsProcessor,
... TFTopKLogitsWarper,
... TFTemperatureLogitsWarper,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
>>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2")
>>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token
>>> model.generation_config.pad_token_id = model.generation_config.eos_token_id
>>> input_prompt = "Today is a beautiful day, and"
>>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids
>>> # instantiate logits processors
>>> logits_processor = TFLogitsProcessorList(
... [
... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id),
... ]
... )
>>> # instantiate logits processors
>>> logits_warper = TFLogitsProcessorList(
... [
... TFTopKLogitsWarper(50),
... TFTemperatureLogitsWarper(0.7),
... ]
... )
>>> tf.random.set_seed(0)
>>> outputs = model.sample(input_ids, logits_processor=logits_processor, logits_warper=logits_warper)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['Today is a beautiful day, and I love my country. But when I look at Donald Trump,']
```"""
# 1. init greedy_search values
logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList()
max_length = max_length if max_length is not None else self.generation_config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache)
use_xla = not tf.executing_eagerly()
# TODO (Joao): fix cache format or find programmatic way to detect cache index
# GPT2 and other models has a slightly different cache structure, with a different batch axis
model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self)
cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0
# some models, like XLNet, need more than the last token in the presence of past_key_values
needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys())
# 2. init `attentions`, `hidden_states`, and `scores` tuples
scores = [] if (return_dict_in_generate and output_scores) else None
decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None
cross_attentions = [] if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None
# 3. init tensors to use for "xla-compileable" generate function
batch_size, cur_len = shape_list(input_ids)
# initialize `generated` (pre-populated with `pad_token_id`), `finished_sequences`
input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0)
generated = tf.concat([input_ids, input_ids_padding], axis=-1)
finished_sequences = tf.zeros((batch_size,), dtype=tf.bool)
# 4. define "xla-compile-able" stop-condition and auto-regressive function
def sample_cond_fn(generated, finished_sequences, cur_len, model_kwargs):
return ~tf.reduce_all(finished_sequences)
def sample_body_fn(generated, finished_sequences, cur_len, model_kwargs):
if model_kwargs.get("past_key_values") is None or needs_full_input:
input_ids = generated[:, :cur_len]
else:
input_ids = tf.expand_dims(generated[:, cur_len - 1], -1)
model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs)
# forward pass to get next token logits
model_outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
next_token_logits = model_outputs.logits[:, -1]
# pre-process distribution
next_tokens_scores = logits_processor(generated, next_token_logits, cur_len)
next_tokens_scores = logits_warper(generated, next_tokens_scores, cur_len)
# Store scores, attentions and hidden_states when required
if not use_xla and return_dict_in_generate:
if output_scores:
scores.append(next_tokens_scores)
if output_attentions and self.config.is_encoder_decoder:
decoder_attentions.append(model_outputs.decoder_attentions)
elif output_attentions and not self.config.is_encoder_decoder:
decoder_attentions.append(model_outputs.attentions)
if self.config.is_encoder_decoder:
cross_attentions.append(model_outputs.cross_attentions)
if output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(model_outputs.decoder_hidden_states)
elif output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(model_outputs.hidden_states)
# sample
if seed is not None:
sample_seed = seed
else:
sample_seed = tf.experimental.numpy.random.randint(tf.int32.min, tf.int32.max, (2,), dtype=tf.int32)
next_tokens = tf.squeeze(
tf.random.stateless_categorical(
logits=next_tokens_scores, num_samples=1, seed=sample_seed, dtype=tf.int32
),
axis=1,
)
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32)
next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq)
next_token_is_eos = tf.math.reduce_any(
tf.equal(
tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1)
),
axis=0,
)
finished_sequences = finished_sequences | next_token_is_eos
# update `generated` and `cur_len`
update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1)
generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens)
cur_len += 1
# update model_kwargs
if use_xla:
model_kwargs = self._update_model_kwargs_for_xla_generation(
model_outputs=model_outputs,
model_kwargs=model_kwargs,
cur_len=cur_len,
max_length=max_length,
batch_size=batch_size,
is_encoder_decoder=self.config.is_encoder_decoder,
batch_axis=cache_batch_axis,
)
else:
model_kwargs = self._update_model_kwargs_for_generation(
model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
# if we don't cache past_key_values key values we need the whole input
if model_kwargs.get("past_key_values", None) is None:
# let's throw out `past_key_values` since we don't want `None` tensors
model_kwargs.pop("past_key_values", None)
return generated, finished_sequences, cur_len, model_kwargs
# 5. run generation
# 1st generation step has to be run before to initialize `past_key_values`
generated, finished_sequences, cur_len, model_kwargs = sample_body_fn(
generated, finished_sequences, cur_len, model_kwargs
)
# 2-to-n generation steps can then be run in autoregressive fashion
# only in case 1st generation step does NOT yield EOS token though
maximum_iterations = max_length - cur_len
generated, _, cur_len, _ = tf.while_loop(
sample_cond_fn,
sample_body_fn,
(generated, finished_sequences, cur_len, model_kwargs),
maximum_iterations=maximum_iterations,
)
# 6. prepare outputs
if not use_xla:
# cut for backward compatibility
generated = generated[:, :cur_len]
if return_dict_in_generate:
if self.config.is_encoder_decoder:
# if model is an encoder-decoder, retrieve encoder attention weights
# and hidden states
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
scores = tuple(scores) if scores is not None else None
decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None
cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None
decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None
return TFSampleEncoderDecoderOutput(
sequences=generated,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return TFSampleDecoderOnlyOutput(
sequences=generated,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return generated
@staticmethod
def _gather_beams(nested, beam_indices, batch_axis=0):
"""Gathers the beam slices indexed by beam_indices into new beam array."""
def gather_fn(tensor):
if batch_axis > 0:
# pushes all dimensions before the batch to the end, so we get (batch, beam_id, ...)
perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0)
tensor = tf.transpose(tensor, perm=perm)
gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1)
if batch_axis > 0:
# transposes back to the original dimensions
perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0)
perm = tf.math.invert_permutation(perm)
gathered_tensor = tf.transpose(gathered_tensor, perm=perm)
return gathered_tensor
return tf.nest.map_structure(gather_fn, nested)
def beam_search(
self,
input_ids: tf.Tensor,
do_sample: bool = False,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
length_penalty: Optional[float] = None,
early_stopping: Optional[Union[bool, str]] = None,
logits_processor: Optional[TFLogitsProcessorList] = None,
logits_warper: Optional[TFLogitsProcessorList] = None,
num_return_sequences: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]:
r"""
Generates sequences for models with a language modeling head using beam search. If `do_sample` is `False`, uses
a greedy approach, otherwise does multinomial sampling without replacement.
Parameters:
input_ids (`tf.Tensor` of shape `(batch_size, num_beams, sequence_length)`):
The sequence used as a prompt for the generation.
do_sample (`bool`, *optional*, defaults to `False`):
Whether or not to use sampling ; use greedy decoding otherwise.
max_length (`int`, *optional*, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, list[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
length_penalty (`float`, *optional*, defaults to 1.0):
Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent
to the sequence length, which in turn is used to divide the score of the sequence. Since the score is
the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences,
while `length_penalty` < 0.0 encourages shorter sequences.
early_stopping (`bool` or `str`, *optional*, defaults to `False`):
Controls the stopping condition for beam-based methods, like beam-search. It accepts the following
values: `True`, where the generation stops as soon as there are `num_beams` complete candidates;
`False`, where an heuristic is applied and the generation stops when is it very unlikely to find better
candidates; `"never"`, where the beam search procedure only stops when there cannot be better
candidates (canonical beam search algorithm).
logits_processor (`[TFLogitsProcessorList]`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
logits_warper (`TFLogitsProcessorList`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`]
used to warp the prediction score distribution of the language modeling head applied before multinomial
sampling at each generation step.
num_return_sequences(`int`, *optional*, defaults to 1):
The number of independently computed returned sequences for each element in the batch.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
model_kwargs:
Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an
encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.TFBeamSearchDecoderOnlyOutput`], [`~generation.TFBeamSearchEncoderDecoderOutput`] or
`tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a
[`~generation.TFBeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and
`return_dict_in_generate=True` or a [`~generation.TFBeamSearchEncoderDecoderOutput`] if
`model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import (
... AutoTokenizer,
... TFAutoModelForSeq2SeqLM,
... TFLogitsProcessorList,
... TFMinLengthLogitsProcessor,
... )
>>> import tensorflow as tf
>>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base")
>>> model = TFAutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base")
>>> encoder_input_str = "translate English to German: How old are you?"
>>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="tf").input_ids
>>> # lets run beam search using 3 beams
>>> num_beams = 3
>>> # define decoder start token ids
>>> input_ids = tf.ones((1, num_beams, 1), dtype=tf.int32)
>>> input_ids = input_ids * model.generation_config.decoder_start_token_id
>>> # add encoder_outputs to model keyword arguments
>>> encoder_outputs = model.get_encoder()(encoder_input_ids, return_dict=True)
>>> encoder_outputs.last_hidden_state = tf.repeat(
... tf.expand_dims(encoder_outputs.last_hidden_state, axis=0), num_beams, axis=1
... )
>>> model_kwargs = {"encoder_outputs": encoder_outputs}
>>> # instantiate logits processors
>>> logits_processor = TFLogitsProcessorList(
... [TFMinLengthLogitsProcessor(5, eos_token_id=model.generation_config.eos_token_id)]
... )
>>> outputs = model.beam_search(input_ids, logits_processor=logits_processor, **model_kwargs)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['Wie alt bist du?']
```"""
def flatten_beam_dim(tensor, batch_axis=0):
"""Flattens the first two dimensions of a non-scalar array."""
shape = shape_list(tensor)
return tf.reshape(
tensor,
shape[:batch_axis] + [shape[batch_axis] * shape[batch_axis + 1]] + shape[batch_axis + 2 :],
)
def unflatten_beam_dim(tensor, num_beams, batch_axis=0):
"""Unflattens the first, flat batch*beam dimension of a non-scalar array."""
shape = shape_list(tensor)
return tf.reshape(tensor, shape[:batch_axis] + [-1, num_beams] + shape[batch_axis + 1 :])
# 1. init beam_search values
logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList()
max_length = max_length if max_length is not None else self.generation_config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
num_return_sequences = (
num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences
)
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty
early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping
use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache)
use_xla = not tf.executing_eagerly()
# TODO (Joao): fix cache format or find programmatic way to detect cache index
# GPT2 and other models has a slightly different cache structure, with a different batch axis
model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self)
cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0
# some models, like XLNet, need more than the last token in the presence of past_key_values
needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys())
# 2. init `attentions`, `hidden_states`, and `scores` tuples
all_scores = [] if (return_dict_in_generate and output_scores) else None
decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None
cross_attentions = [] if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None
# 3. init tensors to use for "xla-compileable" generate function
batch_size, num_beams, cur_len = shape_list(input_ids)
# store the prompt length of decoder
decoder_prompt_len = cur_len
# per batch, beam-item holding current token in loop, pre-populated with `pad_token_id`
input_ids_padding = tf.ones((batch_size, num_beams, max_length - cur_len), dtype=tf.int32) * (
pad_token_id or 0
)
running_sequences = tf.concat([input_ids, input_ids_padding], axis=-1)
sequences = tf.ones((batch_size, num_beams, max_length), dtype=tf.int32) * (pad_token_id or 0)
# per batch,beam-item state bit indicating if sentence has finished.
is_sent_finished = tf.zeros((batch_size, num_beams), dtype=tf.bool)
# per batch, beam-item score, logprobs
running_scores = tf.tile(
tf.expand_dims(tf.convert_to_tensor([0.0] + [-1.0e9] * (num_beams - 1)), axis=0), [batch_size, 1]
)
scores = tf.ones((batch_size, num_beams)) * -1.0e9
# per batch beam indices
running_beam_indices = tf.ones((batch_size, num_beams, max_length - decoder_prompt_len), dtype=tf.int32) * -1
beam_indices = tf.ones((batch_size, num_beams, max_length - decoder_prompt_len), dtype=tf.int32) * -1
# flatten beam dim
if "encoder_outputs" in model_kwargs:
model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim(
model_kwargs["encoder_outputs"]["last_hidden_state"]
)
if "attention_mask" in model_kwargs:
model_kwargs["attention_mask"] = flatten_beam_dim(model_kwargs["attention_mask"])
# 4. define "xla-compile-able" stop-condition and auto-regressive function
# define stop-condition and auto-regressive function
def beam_search_cond_fn(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
decoder_prompt_len,
model_kwargs,
):
"""
Beam Search termination condition function -- halts the generation loop if any of these conditions becomes
False
"""
# 1. is less than max length?
not_max_length_yet = cur_len < max_length
# 2. can the new beams still improve?
# early_stopping == False -> apply heuristic = always get the best score from `cur_len - decoder_prompt_len`. See the discussion
# below for more details.
# https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
# early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of
# length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there.
if early_stopping == "never" and length_penalty > 0.0:
best_running_score = running_scores[:, :1] / ((max_length - decoder_prompt_len) ** length_penalty)
else:
best_running_score = running_scores[:, :1] / (
tf.cast(cur_len - decoder_prompt_len, dtype=tf.float32) ** length_penalty
)
worst_finished_score = tf.where(
is_sent_finished, tf.math.reduce_min(scores, axis=1, keepdims=True), -1.0e9
)
improvement_still_possible = tf.math.reduce_any(best_running_score > worst_finished_score)
# 3. is there still a beam that has not finished?
still_open_beam = ~(tf.math.reduce_all(is_sent_finished) & (early_stopping is True))
return not_max_length_yet & still_open_beam & improvement_still_possible
def beam_search_body_fn(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
decoder_prompt_len,
model_kwargs,
):
"""
Beam Search iterative update function -- each iteration adds a new token and updates the best sequences
seen so far
"""
# 1. Forward current tokens
if model_kwargs.get("past_key_values") is None or needs_full_input:
input_ids = running_sequences[:, :, :cur_len]
else:
input_ids = tf.expand_dims(running_sequences[:, :, cur_len - 1], -1)
model_inputs = self.prepare_inputs_for_generation(
flatten_beam_dim(input_ids), use_cache=use_cache, **model_kwargs
)
model_outputs = self(
**model_inputs,
return_dict=True,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
logits = unflatten_beam_dim(model_outputs.logits[:, -1], num_beams)
# 2. Compute log probs
# get log probabilities from logits, process logits with processors (*e.g.* min_length, ...), and
# add new logprobs to existing running logprobs scores.
log_probs = tf.nn.log_softmax(logits)
log_probs = logits_processor(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len)
log_probs = unflatten_beam_dim(log_probs, num_beams)
if do_sample:
log_probs = logits_warper(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len)
log_probs = unflatten_beam_dim(log_probs, num_beams)
log_probs_processed = log_probs
log_probs = log_probs + tf.expand_dims(running_scores, axis=2)
vocab_size = log_probs.shape[2]
log_probs = tf.reshape(log_probs, (batch_size, num_beams * vocab_size))
# Store scores, attentions and hidden_states when required
if not use_xla and return_dict_in_generate:
if output_scores:
all_scores.append(
logits_warper(
flatten_beam_dim(running_sequences),
flatten_beam_dim(log_probs_processed),
cur_len,
)
)
if output_attentions and self.config.is_encoder_decoder:
decoder_attentions.append(model_outputs.decoder_attentions)
elif output_attentions and not self.config.is_encoder_decoder:
decoder_attentions.append(model_outputs.attentions)
if self.config.is_encoder_decoder:
cross_attentions.append(model_outputs.cross_attentions)
if output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(model_outputs.decoder_hidden_states)
elif output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(model_outputs.hidden_states)
# 3. Retrieve top-K
# Each item in batch has num_beams * vocab_size candidate sequences. For each item, get the top 2*k
# candidates with the highest log-probabilities. We gather the top 2*K beams here so that even if the
# best K sequences reach EOS simultaneously, we have another K sequences remaining to continue the live
# beam search.
# Gather the top 2*K scores from _all_ beams.
# Gather 2*k top beams.
# Recover the beam index by floor division.
# Recover token id by modulo division and expand Id array for broadcasting.
# Update sequences for the 2*K top-k new sequences.
beams_to_keep = 2 * num_beams
if do_sample:
topk_indices = sample_without_replacement(log_probs, beams_to_keep)
topk_log_probs = tf.gather(log_probs, topk_indices, axis=1, batch_dims=1)
else:
topk_log_probs, topk_indices = tf.math.top_k(log_probs, k=beams_to_keep)
topk_current_beam_indices = topk_indices // vocab_size
topk_running_beam_indices = self._gather_beams(running_beam_indices, topk_current_beam_indices)
topk_running_sequences = self._gather_beams(running_sequences, topk_current_beam_indices)
topk_ids = topk_indices % vocab_size
# writes the new token
indices_batch = tf.repeat(tf.range(batch_size), [beams_to_keep])
indices_beam = tf.tile(tf.range(beams_to_keep), [batch_size])
update_indices = tf.stack(
[indices_batch, indices_beam, tf.broadcast_to(cur_len, [batch_size * beams_to_keep])], axis=-1
)
topk_sequences = tf.tensor_scatter_nd_update(
tensor=topk_running_sequences,
indices=update_indices,
updates=tf.reshape(topk_ids, [batch_size * beams_to_keep]),
)
# we want to store the beam indices with batch information -> real beam index = beam index % num beams
batch_modified_indices = topk_current_beam_indices + tf.broadcast_to(
tf.expand_dims(tf.range(batch_size) * num_beams, axis=1), topk_current_beam_indices.shape
)
update_indices = tf.stack(
[
indices_batch,
indices_beam,
tf.broadcast_to(cur_len - decoder_prompt_len, [batch_size * beams_to_keep]),
],
axis=-1,
)
topk_beam_indices = tf.tensor_scatter_nd_update(
tensor=topk_running_beam_indices,
indices=update_indices,
updates=tf.reshape(batch_modified_indices, [batch_size * beams_to_keep]),
)
# 4. Check which sequences have ended
# Update current sequences: Did the top `num_beams` sequences reach an end marker?
# To prevent these just finished sequences from being added to the current sequences
# set of active beam search sequences, set their log probs to a very large negative value.
if eos_token_id is None:
eos_in_next_token = tf.zeros(topk_sequences[:, :, cur_len].shape, dtype=tf.bool)
else:
eos_in_next_token = tf.math.reduce_any(
tf.equal(
tf.broadcast_to(
topk_sequences[:, :, cur_len],
[len(eos_token_id)] + topk_sequences[:, :, cur_len].shape,
),
tf.expand_dims(tf.expand_dims(eos_token_id, -1), -1),
),
axis=0,
)
did_topk_just_finished = eos_in_next_token & tf.broadcast_to(
tf.concat((tf.ones((num_beams), dtype=tf.bool), tf.zeros((num_beams), dtype=tf.bool)), axis=0),
shape_list(eos_in_next_token),
)
# non-top `num_beams` eos tokens can't be used to finish a beam, but the others can't be used in the next
# running sentences either
running_topk_log_probs = topk_log_probs + tf.cast(eos_in_next_token, tf.float32) * -1.0e9
# 5. Get running sequences scores for next
# Determine the top k beam indices (from top 2*k beams) from log probs and gather top k beams
# (from top 2*k beams).
next_topk_indices = tf.math.top_k(running_topk_log_probs, k=num_beams)[1]
next_running_sequences, next_running_scores, next_running_beam_indices = self._gather_beams(
[topk_sequences, running_topk_log_probs, topk_beam_indices], next_topk_indices
)
# 6. Process topk logits
# Further process log probs:
# - add length penalty
# - make sure no scores can be added anymore if beam is full
# - make sure still running sequences cannot be chosen as finalized beam
topk_log_probs = topk_log_probs / (
tf.cast(cur_len + 1 - decoder_prompt_len, dtype=tf.float32) ** length_penalty
)
beams_in_batch_are_full = tf.broadcast_to(
tf.math.reduce_all(is_sent_finished, axis=-1, keepdims=True), shape_list(did_topk_just_finished)
) & (early_stopping is True)
add_penalty = ~did_topk_just_finished | beams_in_batch_are_full
topk_log_probs += tf.cast(add_penalty, tf.float32) * -1.0e9
# 7. Get scores, sequences, is sentence finished for next.
# Combine sequences, scores, and flags along the beam dimension and compare new finished sequence scores
# to existing finished scores and select the best from the new set of beams
merged_sequences = tf.concat([sequences, topk_sequences], axis=1)
merged_scores = tf.concat([scores, topk_log_probs], axis=1)
merged_beams = tf.concat([beam_indices, topk_beam_indices], axis=1)
merged_is_sent_finished = tf.concat([is_sent_finished, did_topk_just_finished], axis=1)
topk_merged_indices = tf.math.top_k(merged_scores, k=num_beams)[1]
next_sequences, next_scores, next_beam_indices, next_is_sent_finished = self._gather_beams(
[merged_sequences, merged_scores, merged_beams, merged_is_sent_finished], topk_merged_indices
)
# 8. Prepare data for the next iteration
# Determine the top k beam indices from the original set of all beams. With these, gather the top k
# beam-associated caches.
cur_len = cur_len + 1
if "past_key_values" in model_outputs:
cache = tf.nest.map_structure(
lambda tensor: unflatten_beam_dim(tensor, num_beams, batch_axis=cache_batch_axis),
model_outputs.past_key_values,
)
next_running_indices = self._gather_beams(topk_current_beam_indices, next_topk_indices)
next_cache = self._gather_beams(cache, next_running_indices, batch_axis=cache_batch_axis)
model_outputs["past_key_values"] = tf.nest.map_structure(
lambda tensor: flatten_beam_dim(tensor, batch_axis=cache_batch_axis), next_cache
)
if use_xla:
next_model_kwargs = self._update_model_kwargs_for_xla_generation(
model_outputs=model_outputs,
model_kwargs=model_kwargs,
cur_len=cur_len,
max_length=max_length,
batch_size=(batch_size * num_beams),
is_encoder_decoder=self.config.is_encoder_decoder,
batch_axis=cache_batch_axis,
)
else:
next_model_kwargs = self._update_model_kwargs_for_generation(
model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
# if we don't cache past_key_values key values we need the whole input
if model_kwargs.get("past_key_values", None) is None:
# let's throw out `past_key_values` since we don't want `None` tensors
model_kwargs.pop("past_key_values", None)
return (
cur_len,
next_running_sequences,
next_running_scores,
next_running_beam_indices,
next_sequences,
next_scores,
next_beam_indices,
next_is_sent_finished,
decoder_prompt_len,
next_model_kwargs,
)
# 5. run generation
# 1st generation step has to be run before to initialize `past_key_values` (if active)
(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
decoder_prompt_len,
model_kwargs,
) = beam_search_body_fn(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
decoder_prompt_len,
model_kwargs,
)
# 2-to-n generation steps can then be run in autoregressive fashion (only in case 1st generation step does
# NOT yield EOS token though)
maximum_iterations = max_length - cur_len
(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
decoder_prompt_len,
_,
) = tf.while_loop(
beam_search_cond_fn,
beam_search_body_fn,
(
cur_len,
running_sequences,
running_scores,
running_beam_indices,
sequences,
scores,
beam_indices,
is_sent_finished,
decoder_prompt_len,
model_kwargs,
),
maximum_iterations=maximum_iterations,
)
# 6. prepare outputs
# Account for the edge-case where there are no finished sequences for a particular batch item. If so, return
# running sequences for that batch item.
none_finished = tf.math.reduce_any(is_sent_finished, axis=1)
sequences = tf.where(none_finished[:, None, None], sequences, running_sequences)
beam_indices = tf.where(none_finished[:, None, None], beam_indices, running_beam_indices)
# Apply the length penalty so that running scores match the finalized scores if they are used
running_scores = running_scores / (tf.cast(cur_len - decoder_prompt_len, dtype=tf.float32) ** length_penalty)
scores = tf.where(none_finished[:, None], scores, running_scores)
# Take best beams for each batch (the score is sorted in descending order)
sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :])
scores = flatten_beam_dim(scores[:, :num_return_sequences])
beam_indices = flatten_beam_dim(beam_indices[:, :num_return_sequences, :])
if not use_xla:
# Cut for backward compatibility
sequences = sequences[:, :cur_len]
beam_indices = beam_indices[:, : cur_len - decoder_prompt_len]
if return_dict_in_generate:
if self.config.is_encoder_decoder:
# if model is an encoder-decoder, retrieve encoder attention weights and hidden states
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
output_cls = TFBeamSampleEncoderDecoderOutput if do_sample else TFBeamSearchEncoderDecoderOutput
return output_cls(
sequences=sequences,
sequences_scores=scores,
scores=all_scores,
beam_indices=beam_indices,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
output_cls = TFBeamSampleDecoderOnlyOutput if do_sample else TFBeamSearchDecoderOnlyOutput
return output_cls(
sequences=sequences,
sequences_scores=scores,
scores=all_scores,
beam_indices=beam_indices,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return sequences
def contrastive_search(
self,
input_ids: tf.Tensor,
top_k: Optional[int] = 1,
penalty_alpha: Optional[float] = 0,
logits_processor: Optional[TFLogitsProcessorList] = None,
logits_warper: Optional[TFLogitsProcessorList] = None,
max_length: Optional[int] = None,
pad_token_id: Optional[int] = None,
eos_token_id: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_scores: Optional[bool] = None,
return_dict_in_generate: Optional[bool] = None,
**model_kwargs,
) -> Union[TFContrastiveSearchOutput, tf.Tensor]:
r"""
Generates sequences of token ids for models with a language modeling head using **contrastive search** and can
be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
Parameters:
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
The sequence used as a prompt for the generation.
top_k (`int`, *optional*, defaults to 1):
The size of the candidate set that is used to re-rank for contrastive search
penalty_alpha (`float`, *optional*, defaults to 0):
The degeneration penalty for contrastive search; activate when it is larger than 0
logits_processor (`TFLogitsProcessorList`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`]
used to modify the prediction scores of the language modeling head applied at each generation step.
logits_warper (`TFLogitsProcessorList`, *optional*):
An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`]
used to warp the prediction score distribution of the language modeling head applied before multinomial
sampling at each generation step.
max_length (`int`, *optional*, defaults to 20):
The maximum length of the sequence to be generated.
pad_token_id (`int`, *optional*):
The id of the *padding* token.
eos_token_id (`Union[int, list[int]]`, *optional*):
The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
output_attentions (`bool`, *optional*, defaults to `False`):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more details.
output_hidden_states (`bool`, *optional*, defaults to `False`):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more details.
output_scores (`bool`, *optional*, defaults to `False`):
Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
return_dict_in_generate (`bool`, *optional*, defaults to `False`):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
model_kwargs:
Additional model specific keyword arguments will be forwarded to the `call` function of the model. If
model is an encoder-decoder model the kwargs should include `encoder_outputs`.
Return:
[`~generation.TFContrastiveSearchDecoderOnlyOutput`],
[`~generation.TFContrastiveSearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the
generated tokens (default behaviour) or a [`~generation.TFContrastiveySearchDecoderOnlyOutput`] if
`model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a
[`~generation.TFContrastiveSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`.
Examples:
```python
>>> from transformers import AutoTokenizer, TFAutoModelForCausalLM
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m")
>>> model = TFAutoModelForCausalLM.from_pretrained("facebook/opt-125m")
>>> # set pad_token_id to eos_token_id because OPT does not have a PAD token
>>> model.config.pad_token_id = model.config.eos_token_id
>>> input_prompt = "DeepMind Company is"
>>> input_ids = tokenizer(input_prompt, return_tensors="tf")
>>> outputs = model.contrastive_search(**input_ids, penalty_alpha=0.6, top_k=4, max_length=64)
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it']
```"""
def gather_best_candidate(nested, selected_idx_stacked, batch_axis=0):
"""Gathers the slices indexed by selected_idx_stacked from a potentially nested structure of tensors."""
def gather_fn(tensor):
gathered_tensor = tf.gather(params=tensor, indices=selected_idx_stacked, axis=batch_axis)
return gathered_tensor
return tf.nest.map_structure(gather_fn, nested)
# 1. init greedy_search values
logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList()
logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList()
max_length = max_length if max_length is not None else self.generation_config.max_length
pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
if isinstance(eos_token_id, int):
eos_token_id = [eos_token_id]
output_scores = output_scores if output_scores is not None else self.generation_config.output_scores
output_attentions = (
output_attentions if output_attentions is not None else self.generation_config.output_attentions
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states
)
return_dict_in_generate = (
return_dict_in_generate
if return_dict_in_generate is not None
else self.generation_config.return_dict_in_generate
)
use_cache = True # In contrastive search, we always use cache
model_kwargs.pop("use_cache", None)
use_xla = not tf.executing_eagerly()
# TODO (Joao): fix cache format or find programmatic way to detect cache index
# GPT2 and other models has a slightly different cache structure, with a different batch axis
model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self)
cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0
# 2. init `attentions`, `hidden_states`, and `scores` tuples
scores = [] if (return_dict_in_generate and output_scores) else None
decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None
cross_attentions = [] if (return_dict_in_generate and output_attentions) else None
decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None
# 3. init tensors to use for "xla-compileable" generate function
batch_size, cur_len = shape_list(input_ids)
# initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences`
input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0)
generated = tf.concat([input_ids, input_ids_padding], axis=-1)
finished_sequences = tf.zeros((batch_size,), dtype=tf.bool)
# 4. define "xla-compile-able" stop-condition and auto-regressive function
# define condition fn
def contrastive_search_cond_fn(
generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables
):
"""state termination condition fn."""
return ~tf.reduce_all(finished_sequences)
# define condition fn
def contrastive_search_body_fn(
generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables
):
"""state update fn."""
# if the first step in the loop, encode all the prefix and obtain: (1) past_key_values;
# (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step
if model_kwargs.get("past_key_values") is None:
# prepare inputs
model_inputs = self.prepare_inputs_for_generation(
generated[:, :cur_len], use_cache=use_cache, **model_kwargs
)
# encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save
# the `encoder_outputs`
outputs = self(
**model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions
)
# last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with
# previous tokens)
if self.config.is_encoder_decoder:
last_hidden_states = outputs.decoder_hidden_states[-1]
else:
last_hidden_states = outputs.hidden_states[-1]
# XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across
# iterations (with fixed shapes)
if use_xla:
last_hidden_states = tf.pad(last_hidden_states, [[0, 0], [0, max_length - cur_len], [0, 0]])
# next logit for contrastive search to select top-k candidate tokens
logit_for_next_step = outputs.logits[:, -1, :]
if use_xla:
model_kwargs = self._update_model_kwargs_for_xla_generation(
model_outputs=outputs,
model_kwargs=model_kwargs,
cur_len=cur_len,
max_length=max_length,
batch_size=batch_size,
is_encoder_decoder=self.config.is_encoder_decoder,
batch_axis=cache_batch_axis,
)
else:
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
# Expands model inputs top_k times, for batched forward passes (akin to beam search).
_, model_kwargs = self._expand_inputs_for_generation(
expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs
)
past_key_values = model_kwargs.get("past_key_values")
if past_key_values is None:
raise ValueError(
f"{self.__class__.__name__} does not support caching and therefore **can't** be used "
"for contrastive search."
)
elif (
not isinstance(past_key_values[0], (tuple, tf.Tensor))
or past_key_values[0][0].shape[0] != batch_size
):
raise ValueError(
f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be "
"used for contrastive search without further modifications."
)
else:
logit_for_next_step = next_step_cached_variables["logit_for_next_step"]
last_hidden_states = next_step_cached_variables["last_hidden_states"]
outputs = next_step_cached_variables["outputs"]
# contrastive_search main logic start:
# contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by
# degeneration penalty
logit_for_next_step = logits_processor(generated, logit_for_next_step, cur_len)
logit_for_next_step = logits_warper(generated, logit_for_next_step, cur_len)
next_probs = stable_softmax(logit_for_next_step, axis=-1)
top_k_probs, top_k_ids = tf.math.top_k(next_probs, k=top_k)
# Store scores, attentions and hidden_states when required
if not use_xla and return_dict_in_generate:
if output_scores:
scores.append(logit_for_next_step)
if output_attentions and self.config.is_encoder_decoder:
decoder_attentions.append(outputs.decoder_attentions)
elif output_attentions and not self.config.is_encoder_decoder:
decoder_attentions.append(outputs.attentions)
if self.config.is_encoder_decoder:
cross_attentions.append(outputs.cross_attentions)
if output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(outputs.decoder_hidden_states)
elif output_hidden_states and self.config.is_encoder_decoder:
decoder_hidden_states.append(outputs.hidden_states)
# Replicates the new past_key_values to match the `top_k` candidates
model_kwargs["past_key_values"] = tf.nest.map_structure(
lambda tensor: tf.repeat(tensor, top_k, axis=cache_batch_axis), model_kwargs["past_key_values"]
)
# compute the candidate tokens by the language model and collects their hidden_states
next_model_inputs = self.prepare_inputs_for_generation(
tf.reshape(top_k_ids, [-1, 1]), use_cache=use_cache, **model_kwargs
)
outputs = self(
**next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions
)
next_past_key_values = self._extract_past_from_model_output(outputs)
logits = outputs.logits[:, -1, :]
# name is different for encoder-decoder and decoder-only models
if self.config.is_encoder_decoder:
next_hidden = outputs.decoder_hidden_states[-1]
full_hidden_states = outputs.decoder_hidden_states
else:
next_hidden = outputs.hidden_states[-1]
full_hidden_states = outputs.hidden_states
context_hidden = tf.repeat(last_hidden_states[:, :cur_len, :], top_k, axis=0)
# compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the
# model confidence
selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k)
# converts indices to a dimension of top_k to the stacked top_k * batch_size dimension, for indexing
# without a need to reshape on tensors that have these two dimensions stacked
selected_idx_stacked = selected_idx + tf.range(selected_idx.shape[0], dtype=tf.int64) * top_k
# prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing
# the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores
# (model confidence minus degeneration penalty); (6) decoder hidden_states
next_tokens = tf.gather(top_k_ids, selected_idx, axis=1, batch_dims=1)
next_hidden = gather_best_candidate(next_hidden, selected_idx_stacked)
# XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across
# iterations (with fixed shapes)
if use_xla:
last_hidden_states = dynamic_update_slice(last_hidden_states, next_hidden, [0, cur_len, 0])
else:
last_hidden_states = tf.concat([last_hidden_states, next_hidden], axis=1)
next_decoder_hidden_states = gather_best_candidate(full_hidden_states, selected_idx_stacked)
next_past_key_values = gather_best_candidate(
next_past_key_values, selected_idx_stacked, batch_axis=cache_batch_axis
)
logit_for_next_step = gather_best_candidate(logits, selected_idx_stacked)
# Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration
if self.config.is_encoder_decoder:
next_step_cross_attentions = ()
next_step_decoder_attentions = ()
if output_attentions:
next_step_cross_attentions = gather_best_candidate(outputs.cross_attentions, selected_idx_stacked)
next_step_decoder_attentions = gather_best_candidate(
outputs.decoder_attentions, selected_idx_stacked
)
outputs = TFSeq2SeqLMOutput(
past_key_values=next_past_key_values,
decoder_hidden_states=next_decoder_hidden_states,
decoder_attentions=next_step_decoder_attentions or None,
cross_attentions=next_step_cross_attentions or None,
)
else:
next_step_attentions = ()
if output_attentions:
next_step_attentions = gather_best_candidate(outputs.attentions, selected_idx_stacked)
outputs = TFCausalLMOutputWithPast(
past_key_values=next_past_key_values,
hidden_states=next_decoder_hidden_states,
attentions=next_step_attentions or None,
)
# contrastive_search main logic end
if eos_token_id is not None:
if pad_token_id is None:
raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.")
unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32)
next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq)
next_token_is_eos = tf.math.reduce_any(
tf.equal(
tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1)
),
axis=0,
)
finished_sequences = finished_sequences | next_token_is_eos
# update `generated` and `cur_len`
update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1)
generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens)
cur_len += 1
if use_xla:
# NOTE: 1) relative to other generation strategies, contrastive search is always running forward
# passes one step ahead -- hence the `cur_len=cur_len + 1`; 2) the attention mask here is expanded from
# [batch_size, ...] to [batch_size*top_k, ...] -- hence the `batch_size=batch_size * top_k`
model_kwargs = self._update_model_kwargs_for_xla_generation(
model_outputs=outputs,
model_kwargs=model_kwargs,
cur_len=cur_len + 1,
max_length=max_length,
batch_size=batch_size * top_k,
is_encoder_decoder=self.config.is_encoder_decoder,
batch_axis=cache_batch_axis,
)
else:
model_kwargs = self._update_model_kwargs_for_generation(
outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder
)
next_step_cached_variables = {
"logit_for_next_step": logit_for_next_step,
"last_hidden_states": last_hidden_states,
"outputs": outputs,
}
return generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables
# 5. run generation
# 1st generation step has to be run before to initialize `past_key_values`
generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables = contrastive_search_body_fn(
generated, finished_sequences, cur_len, model_kwargs, None
)
# 2-to-n generation steps can then be run in autoregressive fashion
# only in case 1st generation step does NOT yield EOS token though
maximum_iterations = max_length - cur_len
generated, _, cur_len, _, _ = tf.while_loop(
contrastive_search_cond_fn,
contrastive_search_body_fn,
(generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables),
maximum_iterations=maximum_iterations,
)
# 6. prepare outputs
if not use_xla:
# cut for backward compatibility
generated = generated[:, :cur_len]
if return_dict_in_generate:
if self.config.is_encoder_decoder:
# if model is an encoder-decoder, retrieve encoder attention weights
# and hidden states
encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None
encoder_hidden_states = (
model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None
)
scores = tuple(scores) if scores is not None else None
decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None
cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None
decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None
return TFContrastiveSearchEncoderDecoderOutput(
sequences=generated,
scores=scores,
encoder_attentions=encoder_attentions,
encoder_hidden_states=encoder_hidden_states,
decoder_attentions=decoder_attentions,
cross_attentions=cross_attentions,
decoder_hidden_states=decoder_hidden_states,
)
else:
return TFContrastiveSearchDecoderOnlyOutput(
sequences=generated,
scores=scores,
attentions=decoder_attentions,
hidden_states=decoder_hidden_states,
)
else:
return generated
def scatter_values_on_batch_indices(values, batch_indices):
shape = shape_list(batch_indices)
# broadcast batch dim to shape
broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1])
# transform batch_indices to pair_indices
pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0))
# scatter values to pair indices
return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape)
def sample_without_replacement(logits, num_samples):
"""
categorical sampling without replacement is currently not implemented the gumbel-max trick will do for now see
https://github.com/tensorflow/tensorflow/issues/9260 for more info
"""
z = -tf.math.log(-tf.math.log(tf.random.uniform(shape_list(logits), 0, 1)))
_, indices = tf.nn.top_k(logits + z, num_samples)
return indices
def _ranking_fast(
context_hidden: tf.Tensor,
next_hidden: tf.Tensor,
next_top_k_probs: tf.Tensor,
alpha: float,
beam_width: int,
) -> tf.Tensor:
"""
Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described
in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each
row in the batch.
"""
norm_context_hidden = context_hidden / tf.norm(context_hidden, axis=2, keepdims=True)
norm_next_hidden = next_hidden / tf.norm(next_hidden, axis=2, keepdims=True)
cosine_matrix = tf.squeeze(tf.linalg.matmul(norm_context_hidden, norm_next_hidden, transpose_b=True), axis=-1)
degeneration_penalty = tf.reduce_max(cosine_matrix, axis=-1)
next_top_k_probs = tf.reshape(next_top_k_probs, shape=[-1])
contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty
contrastive_score = tf.reshape(contrastive_score, shape=[-1, beam_width])
selected_idx = tf.argmax(contrastive_score, axis=1)
return selected_idx
| transformers/src/transformers/generation/tf_utils.py/0 | {
"file_path": "transformers/src/transformers/generation/tf_utils.py",
"repo_id": "transformers",
"token_count": 76504
} | 433 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration with Deepspeed
"""
import copy
import importlib.metadata as importlib_metadata
import importlib.util
import weakref
from functools import partialmethod
from ..dependency_versions_check import dep_version_check
from ..utils import is_accelerate_available, is_torch_available, logging
if is_torch_available():
import torch
from torch import nn
logger = logging.get_logger(__name__)
def is_deepspeed_available():
package_exists = importlib.util.find_spec("deepspeed") is not None
# Check we're not importing a "deepspeed" directory somewhere but the actual library by trying to grab the version
# AND checking it has an author field in the metadata that is HuggingFace.
if package_exists:
try:
_ = importlib_metadata.metadata("deepspeed")
return True
except importlib_metadata.PackageNotFoundError:
return False
if is_accelerate_available() and is_deepspeed_available():
from accelerate.utils.deepspeed import HfDeepSpeedConfig as DeepSpeedConfig
else:
# Inherits from a dummy `object` if accelerate is not available, so that python succeeds to import this file.
# Deepspeed glue code will never inherit this dummy object as it checks if accelerate is available.
from builtins import object as DeepSpeedConfig
class HfDeepSpeedConfig(DeepSpeedConfig):
"""
This object contains a DeepSpeed configuration dictionary and can be quickly queried for things like zero stage.
A `weakref` of this object is stored in the module's globals to be able to access the config from areas where
things like the Trainer object is not available (e.g. `from_pretrained` and `_get_resized_embeddings`). Therefore
it's important that this object remains alive while the program is still running.
[`Trainer`] uses the `HfTrainerDeepSpeedConfig` subclass instead. That subclass has logic to sync the configuration
with values of [`TrainingArguments`] by replacing special placeholder values: `"auto"`. Without this special logic
the DeepSpeed configuration is not modified in any way.
Args:
config_file_or_dict (`Union[str, Dict]`): path to DeepSpeed config file or dict.
"""
def __init__(self, config_file_or_dict):
# set global weakref object
set_hf_deepspeed_config(self)
dep_version_check("accelerate")
dep_version_check("deepspeed")
super().__init__(config_file_or_dict)
class HfTrainerDeepSpeedConfig(HfDeepSpeedConfig):
"""
The `HfTrainerDeepSpeedConfig` object is meant to be created during `TrainingArguments` object creation and has the
same lifespan as the latter.
"""
def __init__(self, config_file_or_dict):
super().__init__(config_file_or_dict)
self._dtype = None
self.mismatches = []
def dtype(self):
if self._dtype is None:
raise ValueError("trainer_config_process() wasn't called yet to tell dtype")
return self._dtype
def is_auto(self, ds_key_long):
val = self.get_value(ds_key_long)
if val is None:
return False
else:
return val == "auto"
def fill_match(self, ds_key_long, hf_val, hf_key=None, must_match=True):
"""
A utility method that massages the config file and can optionally verify that the values match.
1. Replace "auto" values with `TrainingArguments` value.
2. If it wasn't "auto" and `must_match` is true, then check that DS config matches Trainer
config values and if mismatched add the entry to `self.mismatched` - will assert during
`trainer_config_finalize` for one or more mismatches.
"""
config, ds_key = self.find_config_node(ds_key_long)
if config is None:
return
if config.get(ds_key) == "auto":
config[ds_key] = hf_val
return
if not must_match:
return
ds_val = config.get(ds_key)
if ds_val is not None and ds_val != hf_val:
self.mismatches.append(f"- ds {ds_key_long}={ds_val} vs hf {hf_key}={hf_val}")
fill_only = partialmethod(fill_match, must_match=False)
def trainer_config_process(self, args, auto_find_batch_size=False):
"""
Adjust the config with `TrainingArguments` values. This stage is run during `TrainingArguments` object
creation.
"""
# DeepSpeed does:
# train_batch_size = world_size * train_micro_batch_size_per_gpu * gradient_accumulation_steps
train_batch_size = args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps
self.fill_match(
"train_micro_batch_size_per_gpu",
args.per_device_train_batch_size,
"per_device_train_batch_size",
not auto_find_batch_size,
)
self.fill_match(
"gradient_accumulation_steps",
args.gradient_accumulation_steps,
"gradient_accumulation_steps",
)
self.fill_match(
"train_batch_size",
train_batch_size,
"train_batch_size (calculated)",
not auto_find_batch_size,
)
self.fill_match("gradient_clipping", args.max_grad_norm, "max_grad_norm")
self.fill_match("optimizer.params.lr", args.learning_rate, "learning_rate")
self.fill_match(
"optimizer.params.betas",
[args.adam_beta1, args.adam_beta2],
"adam_beta1+adam_beta2",
)
self.fill_match("optimizer.params.eps", args.adam_epsilon, "adam_epsilon")
self.fill_match("optimizer.params.weight_decay", args.weight_decay, "weight_decay")
self.fill_only("scheduler.params.warmup_min_lr", 0) # not a trainer arg
self.fill_match("scheduler.params.warmup_max_lr", args.learning_rate, "learning_rate")
# total_num_steps - will get set in trainer_config_finalize
# fp16
if args.fp16 or args.fp16_full_eval:
fp16_backend = "apex" if args.fp16_backend == "apex" else "amp"
else:
fp16_backend = None
if args.save_on_each_node:
# deepspeed uses shared storage by default. Let's override this setting if save_on_each_node == True
self.config["checkpoint"] = self.config.get("checkpoint", {})
self.config["checkpoint"]["use_node_local_storage"] = args.save_on_each_node
# amp: similar to the pytorch native amp - it has a bunch of optional params but we won't set
# any here unless the user did the work
self.fill_match(
"fp16.enabled",
((args.fp16 or args.fp16_full_eval) and fp16_backend == "amp"),
"fp16|fp16_full_eval+fp16_backend(amp)",
)
# apex: delegates amp work to apex (which needs to be available), but it cannot be used with any
# ZeRO features
self.fill_match("amp.enabled", fp16_backend == "apex", "fp16+fp16_backend(apex)")
self.fill_match("amp.opt_level", args.fp16_opt_level, "fp16_opt_level")
self.fill_match("bf16.enabled", (args.bf16 or args.bf16_full_eval), "bf16|bf16_full_eval")
# deepspeed's default mode is fp16 unless there is a config that says differently
if self.is_true("bf16.enabled"):
self._dtype = torch.bfloat16
elif self.is_false("fp16.enabled"):
self._dtype = torch.float32
else:
self._dtype = torch.float16
def trainer_config_finalize(self, args, model, num_training_steps):
"""
This stage is run after we have the model and know num_training_steps.
Now we can complete the configuration process.
"""
# zero
# deal with config keys that use `auto` value and rely on model's hidden_size
hidden_size_based_keys = [
"zero_optimization.reduce_bucket_size",
"zero_optimization.stage3_prefetch_bucket_size",
"zero_optimization.stage3_param_persistence_threshold",
]
hidden_size_auto_keys = [x for x in hidden_size_based_keys if self.is_auto(x)]
if len(hidden_size_auto_keys) > 0:
if hasattr(model.config, "hidden_size"):
hidden_size = model.config.hidden_size
elif hasattr(model.config, "hidden_sizes"):
# if there are many hidden sizes pick the largest one
hidden_size = max(model.config.hidden_sizes)
elif hasattr(model.config, "text_config") and hasattr(model.config.text_config, "hidden_size"):
hidden_size = model.config.text_config.hidden_size
elif hasattr(model.config, "text_config") and hasattr(model.config.text_config, "hidden_sizes"):
# if there are many hidden sizes pick the largest one
hidden_size = max(model.config.text_config.hidden_sizes)
else:
raise ValueError(
"The model's config file has neither `hidden_size` nor `hidden_sizes` entry, "
"therefore it's not possible to automatically fill out the following `auto` entries "
f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing "
"`auto` values for these keys with an integer value of your choice."
)
self.fill_only("zero_optimization.reduce_bucket_size", hidden_size * hidden_size)
if self.is_zero3():
# automatically assign the optimal config values based on model config
self.fill_only(
"zero_optimization.stage3_prefetch_bucket_size",
int(0.9 * hidden_size * hidden_size),
)
self.fill_only(
"zero_optimization.stage3_param_persistence_threshold",
10 * hidden_size,
)
# scheduler
self.fill_match(
"scheduler.params.total_num_steps",
num_training_steps,
"num_training_steps (calculated)",
)
self.fill_match(
"scheduler.params.warmup_num_steps",
args.get_warmup_steps(num_training_steps),
"warmup_steps",
)
if len(self.mismatches) > 0:
mismatches = "\n".join(self.mismatches)
raise ValueError(
"Please correct the following DeepSpeed config values that mismatch TrainingArguments"
f" values:\n{mismatches}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
)
# keep the config object global to be able to access it anywhere during TrainingArguments life-cycle
_hf_deepspeed_config_weak_ref = None
def set_hf_deepspeed_config(hf_deepspeed_config_obj):
# this is a special weakref global object to allow us to get to Deepspeed config from APIs
# that don't have an easy way to get to the Deepspeed config outside of the Trainer domain.
global _hf_deepspeed_config_weak_ref
# will go away automatically when HfDeepSpeedConfig is destroyed (when TrainingArguments is destroyed)
_hf_deepspeed_config_weak_ref = weakref.ref(hf_deepspeed_config_obj)
def unset_hf_deepspeed_config():
# useful for unit tests to ensure the global state doesn't leak - call from `tearDown` method
global _hf_deepspeed_config_weak_ref
_hf_deepspeed_config_weak_ref = None
def is_deepspeed_zero3_enabled():
if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None:
return _hf_deepspeed_config_weak_ref().is_zero3()
else:
return False
def deepspeed_config():
if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None:
return _hf_deepspeed_config_weak_ref().config
else:
return None
def _load_state_dict_into_zero3_model(model_to_load, state_dict):
"""
Loads state dict into a model specifically for Zero3, since DeepSpeed does not support the `transformers`
tensor parallelism API.
Nearly identical code to PyTorch's `_load_from_state_dict`
"""
# copy state_dict so `_load_state_dict_into_zero3_model` can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
error_msgs = []
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, state_dict, prefix="", assign_to_params_buffers=False):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
local_metadata["assign_to_params_buffers"] = assign_to_params_buffers
args = (state_dict, prefix, local_metadata, True, [], [], error_msgs)
# Parameters of module and children will start with prefix. We can exit early if there are none in this
# state_dict
if is_deepspeed_zero3_enabled() and len([key for key in state_dict if key.startswith(prefix)]) > 0:
import deepspeed
# In sharded models, each shard has only part of the full state_dict, so only gather
# parameters that are in the current state_dict.
named_parameters = dict(module.named_parameters(prefix=prefix[:-1], recurse=False))
params_to_gather = [named_parameters[k] for k in state_dict if k in named_parameters]
if len(params_to_gather) > 0:
# because zero3 puts placeholders in model params, this context
# manager gathers (unpartitions) the params of the current layer, then loads from
# the state dict and then re-partitions them again
with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=0):
if torch.distributed.get_rank() == 0:
module._load_from_state_dict(*args)
for name, child in module._modules.items():
if child is not None:
load(child, state_dict, prefix + name + ".", assign_to_params_buffers)
load(model_to_load, state_dict, assign_to_params_buffers=False)
return error_msgs
def deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps, model_parameters):
"""
A convenience wrapper that deals with optimizer and lr scheduler configuration.
"""
from accelerate.utils import DummyOptim, DummyScheduler
config = hf_deepspeed_config.config
# Mixing and matching DS schedulers and optimizers is supported unless Offload is enabled in which case it's:
# 1. DS scheduler + DS optimizer: Yes
# 2. HF scheduler + HF optimizer: Mostly*
# 3. DS scheduler + HF optimizer: Mostly*
# 4. HF scheduler + DS optimizer: Yes
#
# Mostly*: All non-native DeepSpeed optimizers that have both CPU and GPU implementation should work (except LAMB)
optimizer = None
if "optimizer" in config:
if args.adafactor:
raise ValueError(
"--adafactor was passed, but also found `optimizer` configured in the DeepSpeed config. "
"Only one optimizer can be configured."
)
optimizer = DummyOptim(params=model_parameters)
else:
if hf_deepspeed_config.is_offload():
logger.info(
"Detected ZeRO Offload and non-DeepSpeed optimizers: This combination should work as long as the"
" custom optimizer has both CPU and GPU implementation (except LAMB)"
)
# ds supports Adam, OneBitAdam, and Lamb optimizers and can import other optimizers from torch.
# But trainer uses AdamW by default.
optimizer = trainer.create_optimizer()
# To use other optimizers requires voiding warranty with: `zero_allow_untested_optimizer`
config["zero_allow_untested_optimizer"] = True
lr_scheduler = None
if "scheduler" in config:
lr_scheduler = DummyScheduler(optimizer)
else:
if isinstance(optimizer, DummyOptim):
def _lr_scheduler_callable(optimizer):
# create a shallow copy first, so later modifications do not affect original trainer
trainer_copy = copy.copy(trainer)
# at the time _lr_scheduler_callable is called, trainer.lr_scheduler has been set
# update it to None so that we can re-create a new scheduler
trainer_copy.lr_scheduler = None
lr_scheduler = trainer_copy.create_scheduler(
num_training_steps=num_training_steps, optimizer=optimizer
)
return lr_scheduler
lr_scheduler = DummyScheduler(optimizer, lr_scheduler_callable=_lr_scheduler_callable)
else:
lr_scheduler = trainer.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
return optimizer, lr_scheduler
def deepspeed_init(trainer, num_training_steps, inference=False):
"""
Init DeepSpeed, after updating the DeepSpeed configuration with any relevant Trainer's args.
If `resume_from_checkpoint` was passed then an attempt to resume from a previously saved checkpoint will be made.
Args:
trainer: Trainer object
num_training_steps: per single gpu
resume_from_checkpoint: path to a checkpoint if to resume from after normal DeepSpeedEngine load
inference: launch in inference mode (no optimizer and no lr scheduler)
auto_find_batch_size: whether to ignore the `train_micro_batch_size_per_gpu` argument as it's being
set automatically by the auto batch size finder
Returns: optimizer, lr_scheduler
We may use `deepspeed_init` more than once during the life of Trainer, when we do - it's a temp hack based on:
https://github.com/deepspeedai/DeepSpeed/issues/1394#issuecomment-937405374 until Deepspeed fixes a bug where it
can't resume from a checkpoint after it did some stepping https://github.com/deepspeedai/DeepSpeed/issues/1612
"""
from deepspeed.utils import logger as ds_logger
model = trainer.model
args = trainer.args
hf_deepspeed_config = trainer.accelerator.state.deepspeed_plugin.hf_ds_config
# resume config update - some bits like `model` and `num_training_steps` only become available during train
hf_deepspeed_config.trainer_config_finalize(args, model, num_training_steps)
# set the Deepspeed log level consistent with the Trainer
ds_logger.setLevel(args.get_process_log_level())
if inference:
# only Z3 makes sense for the inference
if not hf_deepspeed_config.is_zero3():
raise ValueError("ZeRO inference only makes sense with ZeRO Stage 3 - please adjust your config")
# in case the training config is re-used for inference
hf_deepspeed_config.del_config_sub_tree("optimizer")
hf_deepspeed_config.del_config_sub_tree("lr_scheduler")
optimizer, lr_scheduler = None, None
model_parameters = None
else:
trainer.optimizer = None # important for when deepspeed_init is used as re-init
deepspeed_tp_size = hf_deepspeed_config.config.get("tensor_parallel", {}).get("autotp_size", 1)
if deepspeed_tp_size > 1:
import deepspeed
model = deepspeed.tp_model_init(
model=model,
tp_size=deepspeed_tp_size,
dtype=hf_deepspeed_config.dtype(),
config=hf_deepspeed_config.config,
)
model_parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
optimizer, lr_scheduler = deepspeed_optim_sched(
trainer, hf_deepspeed_config, args, num_training_steps, model_parameters
)
# keep for quick debug:
# from pprint import pprint; pprint(config)
return optimizer, lr_scheduler
def deepspeed_load_checkpoint(deepspeed_engine, checkpoint_path, load_module_strict=True):
# it's possible that the user is trying to resume from model_path, which doesn't necessarily
# contain a deepspeed checkpoint. e.g. examples just check if the dir exists and assume it's
# a resume from a checkpoint and not just a local pretrained weight. So we check here if the
# path contains what looks like a deepspeed checkpoint
import glob
deepspeed_checkpoint_dirs = sorted(glob.glob(f"{checkpoint_path}/global_step*"))
if len(deepspeed_checkpoint_dirs) > 0:
logger.info(f"Attempting to resume from {checkpoint_path}")
# this magically updates self.optimizer and self.lr_scheduler
load_path, _ = deepspeed_engine.load_checkpoint(
checkpoint_path,
load_module_strict=load_module_strict,
load_optimizer_states=True,
load_lr_scheduler_states=True,
)
if load_path is None:
raise ValueError(f"[deepspeed] failed to resume from checkpoint {checkpoint_path}")
else:
raise ValueError(f"Can't find a valid checkpoint at {checkpoint_path}")
| transformers/src/transformers/integrations/deepspeed.py/0 | {
"file_path": "transformers/src/transformers/integrations/deepspeed.py",
"repo_id": "transformers",
"token_count": 8663
} | 434 |
from tokenizers import Regex, Tokenizer, decoders, pre_tokenizers, processors
from tokenizers.models import BPE
from transformers import LlamaTokenizerFast
from transformers.convert_slow_tokenizer import bytes_to_unicode
class MistralConverter:
"""
A general tiktoken converter.
"""
def __init__(
self,
vocab=None,
pattern=r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}{1,3}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+""",
add_prefix_space=False,
additional_special_tokens=None,
*args,
**kwargs,
):
super().__init__(*args)
self.vocab = vocab
self.pattern = pattern
self.add_prefix_space = add_prefix_space
self.additional_special_tokens = additional_special_tokens
def extract_vocab_merges_from_model(self, vocab: str):
bpe_ranks = vocab
byte_encoder = bytes_to_unicode()
def token_bytes_to_string(b):
return "".join([byte_encoder[ord(char)] for char in b.decode("latin-1")])
merges = []
vocab = {}
for idx, (token, rank) in enumerate(bpe_ranks.items()):
if token not in self.additional_special_tokens:
vocab[token_bytes_to_string(token)] = idx
if len(token) == 1:
continue
local = []
for index in range(1, len(token)):
piece_l, piece_r = token[:index], token[index:]
if piece_l in bpe_ranks and piece_r in bpe_ranks and (piece_l + piece_r) in bpe_ranks:
local.append((piece_l, piece_r, rank))
local = sorted(local, key=lambda x: (bpe_ranks[x[0]], bpe_ranks[x[1]]), reverse=False)
merges.extend(local)
else:
vocab[token] = idx
merges = sorted(merges, key=lambda val: val[2], reverse=False)
merges = [(token_bytes_to_string(val[0]), token_bytes_to_string(val[1])) for val in merges]
return vocab, merges
def tokenizer(self):
vocab_scores, merges = self.extract_vocab_merges_from_model(self.vocab)
tokenizer = Tokenizer(BPE(vocab_scores, merges, fuse_unk=False))
if hasattr(tokenizer.model, "ignore_merges"):
tokenizer.model.ignore_merges = True
return tokenizer
def converted(self) -> Tokenizer:
tokenizer = self.tokenizer()
tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[
pre_tokenizers.Split(Regex(self.pattern), behavior="isolated", invert=False),
pre_tokenizers.ByteLevel(add_prefix_space=self.add_prefix_space, use_regex=False),
]
)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.add_special_tokens(self.additional_special_tokens)
tokenizer.post_processor = processors.ByteLevel(trim_offsets=False)
return tokenizer
def convert_tekken_tokenizer(tokenizer_file: str):
"""Convert a "tekken" tokenizer to a fast Tokenizer."""
# Tekken format -- need to use the Converter
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer
# Load directly using their lib
mistral_tokenizer = MistralTokenizer.from_file(tokenizer_file)
# Extract vocab and special tokens
vocab = mistral_tokenizer.instruct_tokenizer.tokenizer._tekken_token2id_nospecial
all_special = [
token.value if hasattr(token, "value") else token
for token in mistral_tokenizer.instruct_tokenizer.tokenizer._all_special_tokens
]
specials_tokens = {token: all_special.index(token) for token in all_special}
specials_tokens.update(vocab)
vocab = specials_tokens
# Convert
tokenizer = LlamaTokenizerFast(
tokenizer_object=MistralConverter(vocab=vocab, additional_special_tokens=all_special).converted(),
)
# Post-process
tokenizer.add_special_tokens({"additional_special_tokens": all_special})
return tokenizer
| transformers/src/transformers/integrations/mistral.py/0 | {
"file_path": "transformers/src/transformers/integrations/mistral.py",
"repo_id": "transformers",
"token_count": 1835
} | 435 |
/*!
**************************************************************************************************
* Deformable DETR
* Copyright (c) 2020 SenseTime. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 [see LICENSE for details]
**************************************************************************************************
* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0
**************************************************************************************************
*/
#include <vector>
#include "cuda/ms_deform_im2col_cuda.cuh"
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <cuda.h>
#include <cuda_runtime.h>
#pragma once
#include <torch/extension.h>
at::Tensor ms_deform_attn_cuda_forward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const int im2col_step)
{
AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
const int batch = value.size(0);
const int spatial_size = value.size(1);
const int num_heads = value.size(2);
const int channels = value.size(3);
const int num_levels = spatial_shapes.size(0);
const int num_query = sampling_loc.size(1);
const int num_point = sampling_loc.size(4);
const int im2col_step_ = std::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
auto output = at::zeros({batch, num_query, num_heads, channels}, value.options());
const int batch_n = im2col_step_;
auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
auto per_value_size = spatial_size * num_heads * channels;
auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto columns = output_n.select(0, n);
AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_forward_cuda", ([&] {
ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),
value.data<scalar_t>() + n * im2col_step_ * per_value_size,
spatial_shapes.data<int64_t>(),
level_start_index.data<int64_t>(),
sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,
batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
columns.data<scalar_t>());
}));
}
output = output.view({batch, num_query, num_heads*channels});
return output;
}
std::vector<at::Tensor> ms_deform_attn_cuda_backward(
const at::Tensor &value,
const at::Tensor &spatial_shapes,
const at::Tensor &level_start_index,
const at::Tensor &sampling_loc,
const at::Tensor &attn_weight,
const at::Tensor &grad_output,
const int im2col_step)
{
AT_ASSERTM(value.is_contiguous(), "value tensor has to be contiguous");
AT_ASSERTM(spatial_shapes.is_contiguous(), "spatial_shapes tensor has to be contiguous");
AT_ASSERTM(level_start_index.is_contiguous(), "level_start_index tensor has to be contiguous");
AT_ASSERTM(sampling_loc.is_contiguous(), "sampling_loc tensor has to be contiguous");
AT_ASSERTM(attn_weight.is_contiguous(), "attn_weight tensor has to be contiguous");
AT_ASSERTM(grad_output.is_contiguous(), "grad_output tensor has to be contiguous");
AT_ASSERTM(value.type().is_cuda(), "value must be a CUDA tensor");
AT_ASSERTM(spatial_shapes.type().is_cuda(), "spatial_shapes must be a CUDA tensor");
AT_ASSERTM(level_start_index.type().is_cuda(), "level_start_index must be a CUDA tensor");
AT_ASSERTM(sampling_loc.type().is_cuda(), "sampling_loc must be a CUDA tensor");
AT_ASSERTM(attn_weight.type().is_cuda(), "attn_weight must be a CUDA tensor");
AT_ASSERTM(grad_output.type().is_cuda(), "grad_output must be a CUDA tensor");
const int batch = value.size(0);
const int spatial_size = value.size(1);
const int num_heads = value.size(2);
const int channels = value.size(3);
const int num_levels = spatial_shapes.size(0);
const int num_query = sampling_loc.size(1);
const int num_point = sampling_loc.size(4);
const int im2col_step_ = std::min(batch, im2col_step);
AT_ASSERTM(batch % im2col_step_ == 0, "batch(%d) must divide im2col_step(%d)", batch, im2col_step_);
auto grad_value = at::zeros_like(value);
auto grad_sampling_loc = at::zeros_like(sampling_loc);
auto grad_attn_weight = at::zeros_like(attn_weight);
const int batch_n = im2col_step_;
auto per_value_size = spatial_size * num_heads * channels;
auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;
auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;
auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});
for (int n = 0; n < batch/im2col_step_; ++n)
{
auto grad_output_g = grad_output_n.select(0, n);
AT_DISPATCH_FLOATING_TYPES(value.type(), "ms_deform_attn_backward_cuda", ([&] {
ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),
grad_output_g.data<scalar_t>(),
value.data<scalar_t>() + n * im2col_step_ * per_value_size,
spatial_shapes.data<int64_t>(),
level_start_index.data<int64_t>(),
sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,
batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,
grad_value.data<scalar_t>() + n * im2col_step_ * per_value_size,
grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,
grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size);
}));
}
return {
grad_value, grad_sampling_loc, grad_attn_weight
};
}
| transformers/src/transformers/kernels/deta/cuda/ms_deform_attn_cuda.cu/0 | {
"file_path": "transformers/src/transformers/kernels/deta/cuda/ms_deform_attn_cuda.cu",
"repo_id": "transformers",
"token_count": 3168
} | 436 |
#define min(a, b) ((a)<(b)?(a):(b))
#define max(a, b) ((a)>(b)?(a):(b))
#define ceil_divide(a, b) ((a)/(b)+((a)%(b)!=0))
#define select(cond, a, b) ((cond)?(a):(b))
#define PI 3.141592
#define EPSILON 1e-8
#define MAX_VAL 1e12
#define MIN_VAL -1e12
#define EMPTY_VALUE -1
| transformers/src/transformers/kernels/yoso/common.h/0 | {
"file_path": "transformers/src/transformers/kernels/yoso/common.h",
"repo_id": "transformers",
"token_count": 140
} | 437 |
# Copyright 2025 The HuggingFace Inc. team.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import json
import os
import re
from contextlib import contextmanager, redirect_stdout
from io import StringIO
from typing import Optional
from .utils.import_utils import is_torch_available, requires
if is_torch_available():
import torch
from safetensors.torch import save_file
# Note to code inspectors: this toolbox is intended for people who add models to `transformers`.
if torch.distributed.is_available():
import torch.distributed.tensor
_torch_distributed_available = True
else:
_torch_distributed_available = False
from .utils import logging
logger = logging.get_logger(__name__)
def _is_rank_zero():
"""Return True if rank=0 or we aren't running distributed."""
if not (_torch_distributed_available and torch.distributed.is_initialized()):
return True
return torch.distributed.get_rank() == 0
MEMORY_ADDRESS_REGEX = re.compile(r"object at 0x[0-9A-Fa-f]+")
def _sanitize_repr_for_diff(x_str: str) -> str:
"""
Replace memory addresses in an object's repr with a stable placeholder
so that beautiful JSON diffs won't be ruined by ephemeral addresses.
"""
return MEMORY_ADDRESS_REGEX.sub("object at 0xXXXXXXXX", x_str)
def _dtensor_repr(x):
"""Return a stable string representation for a DTensor-like object."""
if _is_rank_zero():
return f"DTensor (rank0) -> {repr(x._local_tensor)}"
return "DTensor(non-rank0)"
def _serialize_tensor_like_io(
value, debug_path: Optional[str] = None, use_repr: bool = True, path_to_value: Optional[str] = None
):
"""
Converts Tensors and DTensors to a JSON-serializable dictionary representation.
Args:
value: Any Python object, often including torch Tensors, lists, dicts, etc.
debug_path (`str`, *optional*, defaults to `None`): Directory to dump debug JSON and SafeTensors files.
use_repr (bool, *optional*, defaults to `True`): Whether to save a `repr()`-ized version of the tensor as the
`value` property in the asscoiated FULL_TENSORS.json file, or to store the full tensors in separate
SafeTensors file and store the relative path to that file in the `value` property in the dictionary.
path_to_value (`str`, *optional*, defaults to `None`): The file name for the SafeTensors file holding the full
tensor value if `use_repr=False`.
Returns:
A nested Python structure (list, dict, or sanitized string) that is safe to json.dump.
"""
torch.set_printoptions(sci_mode=True)
if use_repr:
value_out = _repr_to_list(value)
elif path_to_value:
if not path_to_value.endswith(".safetensors"):
path_to_value += ".safetensors"
filepath = os.path.join(debug_path, path_to_value) if debug_path else path_to_value
save_file({"data": value.contiguous().detach().cpu()}, filepath)
value_out = f"./{path_to_value}"
else:
raise ValueError(f"{use_repr=} and {path_to_value=} cannot both be falsy.")
out = {
"shape": repr(value.shape),
"dtype": repr(value.dtype),
"value": value_out,
}
if value.dtype in {torch.float16, torch.float32, torch.bfloat16}:
out.update(
{
"mean": _sanitize_repr_for_diff(repr(value.mean())),
"std": _sanitize_repr_for_diff(repr(value.std())),
"min": _sanitize_repr_for_diff(repr(value.min())),
"max": _sanitize_repr_for_diff(repr(value.max())),
}
)
return out
def _serialize_io(value, debug_path: Optional[str] = None, use_repr: bool = True, path_to_value: Optional[str] = None):
"""
Recursively build a JSON-serializable Python structure from `value`.
Tensors and DTensors become either sanitized repr strings, or are saved to disk as SafeTensors files and their
relative paths are recorded in the returned Python structure.
Lists/tuples/dicts are recursed into.
All memory addresses are replaced with a stable placeholder.
Args:
value: Any Python object, often including torch Tensors, lists, dicts, etc.
debug_path (`str`, *optional*, defaults to `None`): Directory to dump debug JSON and SafeTensors files.
use_repr (bool, *optional*, defaults to `True`): Whether to save a `repr()`-ized version of the tensors as the
`value` property in the asscoiated FULL_TENSORS.json file, or to store full tensors in separate SafeTensors
files and store the relative path to that file in the `value` property.
path_to_value (`str`, *optional*, defaults to `None`): The file name for the SafeTensors file holding the full
tensor value if `use_repr=False`.
Returns:
A nested Python structure (list, dict, or sanitized string) that is safe to json.dump.
"""
if isinstance(value, (list, tuple)):
return [
_serialize_io(v, debug_path=debug_path, use_repr=use_repr, path_to_value=f"{path_to_value}_{i}")
for i, v in enumerate(value)
]
if isinstance(value, dict):
return {
k: _serialize_io(v, debug_path=debug_path, use_repr=use_repr, path_to_value=f"{path_to_value}_{k}")
for k, v in value.items()
}
if hasattr(value, "_local_tensor"):
return _serialize_tensor_like_io(
value._local_tensor, debug_path=debug_path, use_repr=use_repr, path_to_value=path_to_value
)
if isinstance(value, torch.Tensor):
return _serialize_tensor_like_io(value, debug_path=debug_path, use_repr=use_repr, path_to_value=path_to_value)
return _sanitize_repr_for_diff(repr(value))
def _repr_to_list(value: torch.Tensor):
"""
Converts a tensor into a sanitized multi-line string representation.
Args:
value (`torch.Tensor`): The tensor to represent.
Returns:
`list[str]`: List of string lines representing the tensor.
"""
torch.set_printoptions(sci_mode=True, linewidth=120)
with StringIO() as buf, redirect_stdout(buf):
print(value) # to redirected stdout to avoid line splits
raw = buf.getvalue()
return _sanitize_repr_for_diff(raw).splitlines()
def prune_outputs_if_children(node):
# if there are children, remove this node's "outputs"
# so we only see outputs at the leaf level
if node.get("children"):
node.pop("outputs", None)
for child in node["children"]:
prune_outputs_if_children(child)
LAYER_SUFFIX_RE = re.compile(r"(.*)\.(\d+)$") # should be generic enough, ends with a number
def is_layer_block(node):
"""
Checks whether a node represents a layer block with submodules.
Args:
node (`dict`): A node from the call tree.
Returns:
`bool`: Whether the node is a layer block.
"""
match = LAYER_SUFFIX_RE.match(node.get("module_path", ""))
if not match or not node.get("children"):
return False
number = match.group(2)
return any(f".{number}." in child.get("module_path", "") for child in node["children"])
def prune_intermediate_layers(node):
"""
Recursively removes intermediate layers from the tree to improve readability.
Keeps at least the first and last layers if many consecutive layers are present.
Args:
node (`dict`): The root or subnode to prune recursively.
"""
if not node.get("children"):
return
layer_blocks = [(i, child) for i, child in enumerate(node["children"]) if is_layer_block(child)]
if len(layer_blocks) > 2:
to_remove = [i for i, _ in layer_blocks[1:-1]]
node["children"] = [child for i, child in enumerate(node["children"]) if i not in to_remove]
for child in node["children"]:
prune_intermediate_layers(child)
def log_model_debug_trace(debug_path, model):
if debug_path:
try:
os.makedirs(debug_path, exist_ok=True)
base = os.path.join(debug_path, model._debugger_module_dump_name + "_debug_tree")
except Exception as e:
raise ValueError(f"Unexpected or existing debug_path={debug_path}.") from e
else:
base = model._debugger_module_dump_name + "_debug_tree"
logger.info(f"Writing model trace at {base}.json")
full_path = base + "_FULL_TENSORS.json"
summary_path = base + "_SUMMARY.json"
prune_outputs_if_children(model._call_tree)
with open(full_path, "w") as f:
json.dump(model._call_tree, f, indent=2)
# summary-only version for readability - traversing the tree again #TODO optimize?
def strip_values(node):
def clean(val):
if isinstance(val, dict):
val.pop("value", None)
for v in val.values():
clean(v)
elif isinstance(val, list):
for item in val:
clean(item)
clean(node.get("inputs", {}))
clean(node.get("outputs", {}))
for child in node.get("children", []):
strip_values(child)
tree_copy = json.loads(json.dumps(model._call_tree)) # deep copy
strip_values(tree_copy)
with open(summary_path, "w") as f:
json.dump(tree_copy, f, indent=2)
def _attach_debugger_logic(
model,
debug_path: Optional[str] = ".",
do_prune_layers: Optional[bool] = True,
use_repr: bool = True,
):
"""
Attaches a debugging wrapper to every module in the model.
This records structured inputs and outputs during the forward pass into a call tree.
Args:
model (`PreTrainedModel`, `nn.Module`): Model to wrap.
debug_path (`str`): Optional directory to dump debug JSON files.
do_prune_layers (`bool`, *optional*, defaults to `True`): Whether to prune intermediate layers.
use_repr (bool, *optional*, defaults to `True`): Whether to save a `repr()`-ized version of the tensors as the
`value` property in the asscoiated FULL_TENSORS.json file, or to store full tensors in separate SafeTensors
files and store the relative path to that file in the `value` property.
"""
class_name = model.__class__.__name__
# Prepare data structures on the model object
model._call_tree = {"module_path": class_name, "inputs": None, "outputs": None, "children": []}
model._debugger_model_call_stack = []
model._debugger_module_dump_name = class_name # used for final JSON filename
if debug_path:
try:
os.makedirs(debug_path, exist_ok=True)
except Exception as e:
raise ValueError(f"Unexpected or existing debug_path={debug_path}.") from e
def wrap_forward(module, full_path):
orig_forward = module.forward
@functools.wraps(orig_forward)
def wrapped_forward(*inps, **kws):
if _is_rank_zero():
dict_inputs = {"args": inps, "kwargs": kws}
dict_inputs = {k: dict_inputs[k] for k in dict_inputs if len(dict_inputs[k]) > 0}
node = {
"module_path": full_path,
"inputs": _serialize_io(
dict_inputs,
debug_path=debug_path,
use_repr=use_repr,
path_to_value=f"{full_path}_inputs",
),
"outputs": None,
"children": [],
}
model._debugger_model_call_stack.append(node)
with torch.no_grad():
out = orig_forward(*inps, **kws)
if _is_rank_zero():
if sum(1 for _ in module.named_children()) > 0:
node["outputs"] = None
else:
node["outputs"] = _serialize_io(
out,
debug_path=debug_path,
use_repr=use_repr,
path_to_value=f"{full_path}_outputs",
)
finished = model._debugger_model_call_stack.pop()
# prune empty vertices here as well (mostly empty children nodes)
if not finished["children"]:
finished.pop("children")
if model._debugger_model_call_stack:
model._debugger_model_call_stack[-1]["children"].append(finished)
return out
module.forward = wrapped_forward
# wrap all submodules
for name, submodule in model.named_modules():
if name == "":
continue
wrap_forward(submodule, f"{class_name}.{name}")
# wrap top-level forward
real_top_forward = model.forward
@functools.wraps(real_top_forward)
def top_wrapped_forward(*inps, **kws):
if _is_rank_zero():
top_node = {
"module_path": f"{class_name} (top-level)",
"inputs": _serialize_io(
{"args": inps, "kwargs": kws},
debug_path=debug_path,
use_repr=use_repr,
path_to_value=f"{class_name}_inputs",
),
"outputs": None,
"children": [],
}
model._debugger_model_call_stack.append(top_node)
out = real_top_forward(*inps, **kws)
if _is_rank_zero() and model._debugger_model_call_stack:
top_node["outputs"] = _serialize_io(
out,
debug_path=debug_path,
use_repr=use_repr,
path_to_value=f"{class_name}_outputs",
)
finished = model._debugger_model_call_stack.pop()
model._call_tree["inputs"] = finished["inputs"]
model._call_tree["outputs"] = finished["outputs"]
model._call_tree["children"] = finished["children"]
# prune empty stuff for visibility
[model._call_tree.pop(k, None) for k in list(model._call_tree.keys()) if not model._call_tree[k]]
# prune layers that are not 0 or last
if do_prune_layers:
prune_intermediate_layers(model._call_tree)
# Write final JSON trace here
log_model_debug_trace(debug_path=debug_path, model=model)
return out
model.forward = top_wrapped_forward
@requires(backends=("torch",))
@contextmanager
def model_addition_debugger_context(
model,
debug_path: Optional[str] = None,
do_prune_layers: Optional[bool] = True,
use_repr: Optional[bool] = True,
):
"""
# Model addition debugger - context manager for model adders
This context manager is a power user tool intended for model adders.
It tracks all forward calls within a model forward and logs a slice of each input and output on a nested JSON file.
If `use_repr=True` (the default), the JSON file will record a `repr()`-ized version of the tensors as a list of
strings. If `use_repr=False`, the full tensors will be stored in separate SafeTensors files and the JSON file will
provide a relative path to that file.
To note, this context manager enforces `torch.no_grad()`.
## Usage
add the context manager to a model to debug
```python
import torch
from PIL import Image
from transformers import LlavaProcessor, LlavaForConditionalGeneration, model_addition_debugger_context
torch.random.manual_seed(673)
# load pretrained model and processor
model_id = "llava-hf/llava-1.5-7b-hf"
processor = LlavaProcessor.from_pretrained(model_id)
model = LlavaForConditionalGeneration.from_pretrained(model_id)
# create random image input
random_image = Image.fromarray(torch.randint(0, 256, (224, 224, 3), dtype=torch.uint8).numpy())
# prompt
prompt = "<image>Describe this image."
# process inputs
inputs = processor(text=prompt, images=random_image, return_tensors="pt")
# call forward method (not .generate!)
with model_addition_debugger_context(model, debug_path="Your_debug_path", do_prune_layers=False):
output = model.forward(**inputs)
```
"""
orig_forwards = {m: m.forward for _, m in model.named_modules()}
orig_forwards[model] = model.forward
_attach_debugger_logic(model, debug_path, do_prune_layers, use_repr)
try:
yield model
finally:
for module_instance, forward_method in orig_forwards.items():
module_instance.forward = forward_method
| transformers/src/transformers/model_debugging_utils.py/0 | {
"file_path": "transformers/src/transformers/model_debugging_utils.py",
"repo_id": "transformers",
"token_count": 7166
} | 438 |
# coding=utf-8
# Copyright 2023 The Google Research Team Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ALIGN model."""
import math
from dataclasses import dataclass
from typing import Any, Callable, Optional, Union
import torch
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPooling,
BaseModelOutputWithPoolingAndNoAttention,
)
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import ModelOutput, auto_docstring, can_return_tuple, logging
from .configuration_align import AlignConfig, AlignTextConfig, AlignVisionConfig
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
"""
)
class AlignVisionModelOutput(ModelOutput):
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
"""
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for text model's outputs that also contains a pooling of the last hidden states.
"""
)
class AlignTextModelOutput(ModelOutput):
r"""
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The text embeddings obtained by applying the projection layer to the pooler_output.
"""
text_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring
class AlignOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Contrastive loss for image-text similarity.
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
similarity scores.
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
similarity scores.
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The text embeddings obtained by applying the projection layer to the pooled output of [`AlignTextModel`].
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
The output of [`AlignVisionModel`].
text_model_output (`BaseModelOutputWithPooling`):
The output of the [`AlignTextModel`].
vision_model_output (`BaseModelOutputWithPoolingAndNoAttention`):
The output of the [`AlignVisionModel`].
"""
loss: Optional[torch.FloatTensor] = None
logits_per_image: Optional[torch.FloatTensor] = None
logits_per_text: Optional[torch.FloatTensor] = None
text_embeds: Optional[torch.FloatTensor] = None
image_embeds: Optional[torch.FloatTensor] = None
text_model_output: BaseModelOutputWithPooling = None
vision_model_output: BaseModelOutputWithPoolingAndNoAttention = None
def to_tuple(self) -> tuple[Any]:
return tuple(
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
for k in self.keys()
)
# contrastive loss function, adapted from
# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device), label_smoothing=0.1)
def align_loss(similarity: torch.Tensor) -> torch.Tensor:
caption_loss = contrastive_loss(similarity)
image_loss = contrastive_loss(similarity.t())
return (caption_loss + image_loss) / 2.0
# Copied from transformers.models.efficientnet.modeling_efficientnet.round_filters with EfficientNet->AlignVision
def round_filters(config: AlignVisionConfig, num_channels: int):
r"""
Round number of filters based on depth multiplier.
"""
divisor = config.depth_divisor
num_channels *= config.width_coefficient
new_dim = max(divisor, int(num_channels + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_dim < 0.9 * num_channels:
new_dim += divisor
return int(new_dim)
# Copied from transformers.models.efficientnet.modeling_efficientnet.correct_pad
def correct_pad(kernel_size: Union[int, tuple], adjust: bool = True):
r"""
Utility function to get the tuple padding value for the depthwise convolution.
Args:
kernel_size (`int` or `tuple`):
Kernel size of the convolution layers.
adjust (`bool`, *optional*, defaults to `True`):
Adjusts padding value to apply to right and bottom sides of the input.
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
correct = (kernel_size[0] // 2, kernel_size[1] // 2)
if adjust:
return (correct[1] - 1, correct[1], correct[0] - 1, correct[0])
else:
return (correct[1], correct[1], correct[0], correct[0])
# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetEmbeddings with EfficientNet->AlignVision
class AlignVisionEmbeddings(nn.Module):
r"""
A module that corresponds to the stem module of the original work.
"""
def __init__(self, config: AlignVisionConfig):
super().__init__()
self.out_dim = round_filters(config, 32)
self.padding = nn.ZeroPad2d(padding=(0, 1, 0, 1))
self.convolution = nn.Conv2d(
config.num_channels, self.out_dim, kernel_size=3, stride=2, padding="valid", bias=False
)
self.batchnorm = nn.BatchNorm2d(self.out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum)
self.activation = ACT2FN[config.hidden_act]
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
features = self.padding(pixel_values)
features = self.convolution(features)
features = self.batchnorm(features)
features = self.activation(features)
return features
# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseConv2d with EfficientNet->AlignVision
class AlignVisionDepthwiseConv2d(nn.Conv2d):
def __init__(
self,
in_channels,
depth_multiplier=1,
kernel_size=3,
stride=1,
padding=0,
dilation=1,
bias=True,
padding_mode="zeros",
):
out_channels = in_channels * depth_multiplier
super().__init__(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=bias,
padding_mode=padding_mode,
)
# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetExpansionLayer with EfficientNet->AlignVision
class AlignVisionExpansionLayer(nn.Module):
r"""
This corresponds to the expansion phase of each block in the original implementation.
"""
def __init__(self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int):
super().__init__()
self.expand_conv = nn.Conv2d(
in_channels=in_dim,
out_channels=out_dim,
kernel_size=1,
padding="same",
bias=False,
)
self.expand_bn = nn.BatchNorm2d(num_features=out_dim, eps=config.batch_norm_eps)
self.expand_act = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
# Expand phase
hidden_states = self.expand_conv(hidden_states)
hidden_states = self.expand_bn(hidden_states)
hidden_states = self.expand_act(hidden_states)
return hidden_states
# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetDepthwiseLayer with EfficientNet->AlignVision
class AlignVisionDepthwiseLayer(nn.Module):
r"""
This corresponds to the depthwise convolution phase of each block in the original implementation.
"""
def __init__(
self,
config: AlignVisionConfig,
in_dim: int,
stride: int,
kernel_size: int,
adjust_padding: bool,
):
super().__init__()
self.stride = stride
conv_pad = "valid" if self.stride == 2 else "same"
padding = correct_pad(kernel_size, adjust=adjust_padding)
self.depthwise_conv_pad = nn.ZeroPad2d(padding=padding)
self.depthwise_conv = AlignVisionDepthwiseConv2d(
in_dim, kernel_size=kernel_size, stride=stride, padding=conv_pad, bias=False
)
self.depthwise_norm = nn.BatchNorm2d(
num_features=in_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
)
self.depthwise_act = ACT2FN[config.hidden_act]
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
# Depthwise convolution
if self.stride == 2:
hidden_states = self.depthwise_conv_pad(hidden_states)
hidden_states = self.depthwise_conv(hidden_states)
hidden_states = self.depthwise_norm(hidden_states)
hidden_states = self.depthwise_act(hidden_states)
return hidden_states
# Copied from transformers.models.efficientnet.modeling_efficientnet.EfficientNetSqueezeExciteLayer with EfficientNet->AlignVision
class AlignVisionSqueezeExciteLayer(nn.Module):
r"""
This corresponds to the Squeeze and Excitement phase of each block in the original implementation.
"""
def __init__(self, config: AlignVisionConfig, in_dim: int, expand_dim: int, expand: bool = False):
super().__init__()
self.dim = expand_dim if expand else in_dim
self.dim_se = max(1, int(in_dim * config.squeeze_expansion_ratio))
self.squeeze = nn.AdaptiveAvgPool2d(output_size=1)
self.reduce = nn.Conv2d(
in_channels=self.dim,
out_channels=self.dim_se,
kernel_size=1,
padding="same",
)
self.expand = nn.Conv2d(
in_channels=self.dim_se,
out_channels=self.dim,
kernel_size=1,
padding="same",
)
self.act_reduce = ACT2FN[config.hidden_act]
self.act_expand = nn.Sigmoid()
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
inputs = hidden_states
hidden_states = self.squeeze(hidden_states)
hidden_states = self.reduce(hidden_states)
hidden_states = self.act_reduce(hidden_states)
hidden_states = self.expand(hidden_states)
hidden_states = self.act_expand(hidden_states)
hidden_states = torch.mul(inputs, hidden_states)
return hidden_states
class AlignVisionFinalBlockLayer(nn.Module):
r"""
This corresponds to the final phase of each block in the original implementation.
"""
def __init__(
self, config: AlignVisionConfig, in_dim: int, out_dim: int, stride: int, drop_rate: float, id_skip: bool
):
super().__init__()
self.apply_dropout = stride == 1 and not id_skip
self.project_conv = nn.Conv2d(
in_channels=in_dim,
out_channels=out_dim,
kernel_size=1,
padding="same",
bias=False,
)
self.project_bn = nn.BatchNorm2d(
num_features=out_dim, eps=config.batch_norm_eps, momentum=config.batch_norm_momentum
)
self.dropout = nn.Dropout(p=drop_rate)
def forward(self, embeddings: torch.FloatTensor, hidden_states: torch.FloatTensor) -> torch.Tensor:
hidden_states = self.project_conv(hidden_states)
hidden_states = self.project_bn(hidden_states)
if self.apply_dropout:
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + embeddings
return hidden_states
class AlignVisionBlock(nn.Module):
r"""
This corresponds to the block module of original the EfficientNet vision encoder implementation.
Args:
config ([`AlignVisionConfig`]):
Model configuration class.
in_dim (`int`):
Number of input channels.
out_dim (`int`):
Number of output channels.
stride (`int`):
Stride size to be used in convolution layers.
expand_ratio (`int`):
Expand ratio to set the output dimensions for the expansion and squeeze-excite layers.
kernel_size (`int`):
Kernel size for the depthwise convolution layer.
drop_rate (`float`):
Dropout rate to be used in the final phase of each block.
id_skip (`bool`):
Whether to apply dropout and sum the final hidden states with the input embeddings during the final phase
of each block. Set to `True` for the first block of each stage.
adjust_padding (`bool`):
Whether to apply padding to only right and bottom side of the input kernel before the depthwise convolution
operation, set to `True` for inputs with odd input sizes.
"""
def __init__(
self,
config: AlignVisionConfig,
in_dim: int,
out_dim: int,
stride: int,
expand_ratio: int,
kernel_size: int,
drop_rate: float,
id_skip: bool,
adjust_padding: bool,
):
super().__init__()
self.expand_ratio = expand_ratio
self.expand = self.expand_ratio != 1
expand_in_dim = in_dim * expand_ratio
if self.expand:
self.expansion = AlignVisionExpansionLayer(
config=config, in_dim=in_dim, out_dim=expand_in_dim, stride=stride
)
self.depthwise_conv = AlignVisionDepthwiseLayer(
config=config,
in_dim=expand_in_dim if self.expand else in_dim,
stride=stride,
kernel_size=kernel_size,
adjust_padding=adjust_padding,
)
self.squeeze_excite = AlignVisionSqueezeExciteLayer(
config=config, in_dim=in_dim, expand_dim=expand_in_dim, expand=self.expand
)
self.projection = AlignVisionFinalBlockLayer(
config=config,
in_dim=expand_in_dim if self.expand else in_dim,
out_dim=out_dim,
stride=stride,
drop_rate=drop_rate,
id_skip=id_skip,
)
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
embeddings = hidden_states
# Expansion and depthwise convolution phase
if self.expand_ratio != 1:
hidden_states = self.expansion(hidden_states)
hidden_states = self.depthwise_conv(hidden_states)
# Squeeze and excite phase
hidden_states = self.squeeze_excite(hidden_states)
hidden_states = self.projection(embeddings, hidden_states)
return hidden_states
class AlignVisionEncoder(nn.Module):
r"""
Forward propagates the embeddings through each vision encoder (EfficientNet) block.
Args:
config ([`AlignVisionConfig`]):
Model configuration class.
"""
def __init__(self, config: AlignVisionConfig):
super().__init__()
self.depth_coefficient = config.depth_coefficient
def round_repeats(repeats):
# Round number of block repeats based on depth multiplier.
return int(math.ceil(self.depth_coefficient * repeats))
num_base_blocks = len(config.in_channels)
num_blocks = sum(round_repeats(n) for n in config.num_block_repeats)
curr_block_num = 0
blocks = []
for i in range(num_base_blocks):
in_dim = round_filters(config, config.in_channels[i])
out_dim = round_filters(config, config.out_channels[i])
stride = config.strides[i]
kernel_size = config.kernel_sizes[i]
expand_ratio = config.expand_ratios[i]
for j in range(round_repeats(config.num_block_repeats[i])):
id_skip = j == 0
stride = 1 if j > 0 else stride
in_dim = out_dim if j > 0 else in_dim
adjust_padding = curr_block_num not in config.depthwise_padding
drop_rate = config.drop_connect_rate * curr_block_num / num_blocks
block = AlignVisionBlock(
config=config,
in_dim=in_dim,
out_dim=out_dim,
stride=stride,
kernel_size=kernel_size,
expand_ratio=expand_ratio,
drop_rate=drop_rate,
id_skip=id_skip,
adjust_padding=adjust_padding,
)
blocks.append(block)
curr_block_num += 1
self.blocks = nn.ModuleList(blocks)
def forward(
self,
hidden_states: torch.FloatTensor,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> BaseModelOutputWithPoolingAndNoAttention:
all_hidden_states = (hidden_states,) if output_hidden_states else None
for block in self.blocks:
hidden_states = block(hidden_states)
if output_hidden_states:
all_hidden_states += (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
)
class AlignTextEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
)
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
) -> torch.Tensor:
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
head_mask: Optional[torch.Tensor] = None,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
if head_mask is not None:
attn_weights = attn_weights * head_mask.view(1, -1, 1, 1)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class AlignTextSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.config = config
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.attention_dropout = config.attention_probs_dropout_prob
self.scaling = self.attention_head_size**-0.5
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
**kwargs,
) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_states = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
head_mask=head_mask,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
return outputs
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->AlignText
class AlignTextSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class AlignTextAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = AlignTextSelfAttention(config)
self.output = AlignTextSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
**kwargs,
) -> tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
**kwargs,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->AlignText
class AlignTextIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->AlignText
class AlignTextOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class AlignTextLayer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = AlignTextAttention(config)
self.intermediate = AlignTextIntermediate(config)
self.output = AlignTextOutput(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
**kwargs,
) -> tuple[torch.Tensor]:
self_attention_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
**kwargs,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class AlignTextEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([AlignTextLayer(config) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
**kwargs,
) -> Union[tuple[torch.Tensor], BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
head_mask=layer_head_mask,
output_attentions=output_attentions,
**kwargs,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert -> AlignText
class AlignTextPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
@auto_docstring
class AlignPreTrainedModel(PreTrainedModel):
config: AlignConfig
base_model_prefix = "align"
supports_gradient_checkpointing = True
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
std = self.config.initializer_range
if isinstance(module, (nn.Linear, nn.Conv2d)):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, AlignModel):
nn.init.xavier_uniform_(module.text_projection.weight)
module.text_projection.bias.data.zero_()
module.temperature.data.fill_(self.config.temperature_init_value)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, (nn.LayerNorm, nn.BatchNorm2d)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@auto_docstring(
custom_intro="""
The text model from ALIGN without any head or projection on top.
"""
)
class AlignTextModel(AlignPreTrainedModel):
config: AlignTextConfig
_no_split_modules = ["AlignTextEmbeddings"]
def __init__(self, config: AlignTextConfig, add_pooling_layer: bool = True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = AlignTextEmbeddings(config)
self.encoder = AlignTextEncoder(config)
self.pooler = AlignTextPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
Examples:
```python
>>> from transformers import AutoTokenizer, AlignTextModel
>>> model = AlignTextModel.from_pretrained("kakaobrain/align-base")
>>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
**kwargs,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The vision model from ALIGN without any head or projection on top.
"""
)
class AlignVisionModel(AlignPreTrainedModel):
config: AlignVisionConfig
main_input_name = "pixel_values"
supports_gradient_checkpointing = False
def __init__(self, config: AlignVisionConfig):
super().__init__(config)
self.config = config
self.embeddings = AlignVisionEmbeddings(config)
self.encoder = AlignVisionEncoder(config)
# Final pooling layer
if config.pooling_type == "mean":
self.pooler = nn.AvgPool2d(config.hidden_dim, ceil_mode=True)
elif config.pooling_type == "max":
self.pooler = nn.MaxPool2d(config.hidden_dim, ceil_mode=True)
else:
raise ValueError(f"config.pooling must be one of ['mean', 'max'] got {config.pooling}")
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> nn.Module:
return self.vision_model.embeddings.convolution
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
r"""
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AlignVisionModel
>>> model = AlignVisionModel.from_pretrained("kakaobrain/align-base")
>>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
>>> pooled_output = outputs.pooler_output # pooled CLS states
```"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(
embedding_output,
output_hidden_states=output_hidden_states,
return_dict=True,
)
# Apply pooling
last_hidden_state = encoder_outputs[0]
pooled_output = self.pooler(last_hidden_state)
# Reshape (batch_size, projection_dim, 1 , 1) -> (batch_size, projection_dim)
pooled_output = pooled_output.reshape(pooled_output.shape[:2])
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@auto_docstring
class AlignModel(AlignPreTrainedModel):
config: AlignConfig
def __init__(self, config: AlignConfig):
super().__init__(config)
if not isinstance(config.text_config, AlignTextConfig):
raise TypeError(
"config.text_config is expected to be of type AlignTextConfig but is of type"
f" {type(config.text_config)}."
)
if not isinstance(config.vision_config, AlignVisionConfig):
raise TypeError(
"config.vision_config is expected to be of type AlignVisionConfig but is of type"
f" {type(config.vision_config)}."
)
text_config = config.text_config
vision_config = config.vision_config
self.projection_dim = config.projection_dim
self.text_embed_dim = text_config.hidden_size
self.text_model = AlignTextModel(text_config)
self.vision_model = AlignVisionModel(vision_config)
self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim)
self.temperature = nn.Parameter(torch.tensor(self.config.temperature_init_value))
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def get_text_features(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> torch.FloatTensor:
r"""
Returns:
text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
applying the projection layer to the pooled output of [`AlignTextModel`].
Examples:
```python
>>> from transformers import AutoTokenizer, AlignModel
>>> model = AlignModel.from_pretrained("kakaobrain/align-base")
>>> tokenizer = AutoTokenizer.from_pretrained("kakaobrain/align-base")
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
>>> text_features = model.get_text_features(**inputs)
```"""
# Use ALIGN model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = text_outputs[0][:, 0, :]
text_features = self.text_projection(last_hidden_state)
return text_features
@auto_docstring
def get_image_features(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> torch.FloatTensor:
r"""
Returns:
image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
applying the projection layer to the pooled output of [`AlignVisionModel`].
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AlignModel
>>> model = AlignModel.from_pretrained("kakaobrain/align-base")
>>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, return_tensors="pt")
>>> image_features = model.get_image_features(**inputs)
```"""
# Use ALIGN model's config for some fields (if specified) instead of those of vision & text components.
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
image_features = vision_outputs[1] # pooled_output
return image_features
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
return_loss: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, AlignOutput]:
r"""
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
Examples:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, AlignModel
>>> model = AlignModel.from_pretrained("kakaobrain/align-base")
>>> processor = AutoProcessor.from_pretrained("kakaobrain/align-base")
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(
... images=image, text=["a photo of a cat", "a photo of a dog"], return_tensors="pt", padding=True
... )
>>> outputs = model(**inputs)
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
```"""
# Use ALIGN model's config for some fields (if specified) instead of those of vision & text components.
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_outputs = self.vision_model(
pixel_values=pixel_values,
output_hidden_states=output_hidden_states,
return_dict=True,
)
text_outputs = self.text_model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
image_embeds = vision_outputs[1]
text_embeds = text_outputs[0][:, 0, :]
text_embeds = self.text_projection(text_embeds)
# normalized features
image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
# cosine similarity as logits
logits_per_text = torch.matmul(text_embeds, image_embeds.t()) / self.temperature
logits_per_image = logits_per_text.t()
loss = None
if return_loss:
loss = align_loss(logits_per_text)
return AlignOutput(
loss=loss,
logits_per_image=logits_per_image,
logits_per_text=logits_per_text,
text_embeds=text_embeds,
image_embeds=image_embeds,
text_model_output=text_outputs,
vision_model_output=vision_outputs,
)
__all__ = ["AlignPreTrainedModel", "AlignTextModel", "AlignVisionModel", "AlignModel"]
| transformers/src/transformers/models/align/modeling_align.py/0 | {
"file_path": "transformers/src/transformers/models/align/modeling_align.py",
"repo_id": "transformers",
"token_count": 22035
} | 439 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/aria/modular_aria.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_aria.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2024 The Rhymes-AI Teams Authors and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils import PreTokenizedInput, TextInput
from ...utils import TensorType
from ..auto import AutoTokenizer
class AriaProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
"images_kwargs": {
"max_image_size": 980,
"split_image": False,
},
"return_tensors": TensorType.PYTORCH,
}
class AriaProcessor(ProcessorMixin):
"""
AriaProcessor is a processor for the Aria model which wraps the Aria image preprocessor and the LLama slow tokenizer.
Args:
image_processor (`AriaImageProcessor`, *optional*):
The AriaImageProcessor to use for image preprocessing.
tokenizer (`PreTrainedTokenizerBase`, *optional*):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
chat_template (`str`, *optional*):
A Jinja template which will be used to convert lists of messages in a chat into a tokenizable string.
size_conversion (`Dict`, *optional*):
A dictionary indicating size conversions for images.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "AriaImageProcessor"
tokenizer_class = "AutoTokenizer"
def __init__(
self,
image_processor=None,
tokenizer: Union[AutoTokenizer, str] = None,
chat_template: Optional[str] = None,
size_conversion: Optional[dict[Union[float, int], int]] = None,
):
if size_conversion is None:
size_conversion = {490: 128, 980: 256}
self.size_conversion = {int(k): v for k, v in size_conversion.items()}
self.image_token = tokenizer.image_token
self.image_token_id = tokenizer.image_token_id
if tokenizer is not None and tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.unk_token
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]],
images: Optional[ImageInput] = None,
audio=None,
videos=None,
**kwargs: Unpack[AriaProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s).
Args:
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`ImageInput`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_mask** -- Pixel mask to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
AriaProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
if images is not None:
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
# expand the image_token according to the num_crops and tokens per image
tokens_per_image = self.size_conversion[image_inputs.pixel_values.shape[2]]
prompt_strings = []
num_crops = image_inputs.pop("num_crops") * tokens_per_image
for sample in text:
sample = sample.replace(self.tokenizer.image_token, self.tokenizer.image_token * num_crops)
prompt_strings.append(sample)
else:
image_inputs = {}
prompt_strings = text
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"], return_tensors=None)
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = AriaProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
max_size = images_kwargs.get("max_image_size", None) or self.image_processor.max_image_size
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
num_image_tokens = [self.size_conversion[max_size] * num_patches for num_patches in num_image_patches]
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
# Remove `num_crops`, it is popped and used only when processing. Make a copy of list when removing
# otherwise `self.image_processor.model_input_names` is also modified
image_processor_input_names = [name for name in image_processor_input_names if name != "num_crops"]
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
__all__ = ["AriaProcessor"]
| transformers/src/transformers/models/aria/processing_aria.py/0 | {
"file_path": "transformers/src/transformers/models/aria/processing_aria.py",
"repo_id": "transformers",
"token_count": 3849
} | 440 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""AutoVideoProcessor class."""
import importlib
import json
import os
import warnings
from collections import OrderedDict
from typing import TYPE_CHECKING, Optional, Union
# Build the list of all video processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...utils import CONFIG_NAME, VIDEO_PROCESSOR_NAME, cached_file, is_torchvision_available, logging
from ...utils.import_utils import requires
from ...video_processing_utils import BaseVideoProcessor
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
logger = logging.get_logger(__name__)
if TYPE_CHECKING:
# This significantly improves completion suggestion performance when
# the transformers package is used with Microsoft's Pylance language server.
VIDEO_PROCESSOR_MAPPING_NAMES: OrderedDict[str, tuple[Optional[str], Optional[str]]] = OrderedDict()
else:
VIDEO_PROCESSOR_MAPPING_NAMES = OrderedDict(
[
("glm4v", "Glm4vVideoProcessor"),
("instructblip", "InstructBlipVideoVideoProcessor"),
("instructblipvideo", "InstructBlipVideoVideoProcessor"),
("internvl", "InternVLVideoProcessor"),
("llava_next_video", "LlavaNextVideoVideoProcessor"),
("llava_onevision", "LlavaOnevisionVideoProcessor"),
("qwen2_5_omni", "Qwen2VLVideoProcessor"),
("qwen2_5_vl", "Qwen2VLVideoProcessor"),
("qwen2_vl", "Qwen2VLVideoProcessor"),
("sam2_video", "Sam2VideoVideoProcessor"),
("smolvlm", "SmolVLMVideoProcessor"),
("video_llava", "VideoLlavaVideoProcessor"),
("vjepa2", "VJEPA2VideoProcessor"),
]
)
for model_type, video_processors in VIDEO_PROCESSOR_MAPPING_NAMES.items():
fast_video_processor_class = video_processors
# If the torchvision is not available, we set it to None
if not is_torchvision_available():
fast_video_processor_class = None
VIDEO_PROCESSOR_MAPPING_NAMES[model_type] = fast_video_processor_class
VIDEO_PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, VIDEO_PROCESSOR_MAPPING_NAMES)
def video_processor_class_from_name(class_name: str):
for module_name, extractors in VIDEO_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
module_name = model_type_to_module_name(module_name)
module = importlib.import_module(f".{module_name}", "transformers.models")
try:
return getattr(module, class_name)
except AttributeError:
continue
for extractor in VIDEO_PROCESSOR_MAPPING._extra_content.values():
if getattr(extractor, "__name__", None) == class_name:
return extractor
# We did not find the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
main_module = importlib.import_module("transformers")
if hasattr(main_module, class_name):
return getattr(main_module, class_name)
return None
def get_video_processor_config(
pretrained_model_name_or_path: Union[str, os.PathLike],
cache_dir: Optional[Union[str, os.PathLike]] = None,
force_download: bool = False,
resume_download: Optional[bool] = None,
proxies: Optional[dict[str, str]] = None,
token: Optional[Union[bool, str]] = None,
revision: Optional[str] = None,
local_files_only: bool = False,
**kwargs,
):
"""
Loads the video processor configuration from a pretrained model video processor configuration.
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained model configuration hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a configuration file saved using the
[`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if they
exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
local_files_only (`bool`, *optional*, defaults to `False`):
If `True`, will only try to load the video processor configuration from local files.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Returns:
`Dict`: The configuration of the video processor.
Examples:
```python
# Download configuration from huggingface.co and cache.
video_processor_config = get_video_processor_config("llava-hf/llava-onevision-qwen2-0.5b-ov-hf")
# This model does not have a video processor config so the result will be an empty dict.
video_processor_config = get_video_processor_config("FacebookAI/xlm-roberta-base")
# Save a pretrained video processor locally and you can reload its config
from transformers import AutoVideoProcessor
video_processor = AutoVideoProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-0.5b-ov-hf")
video_processor.save_pretrained("video-processor-test")
video_processor = get_video_processor_config("video-processor-test")
```"""
use_auth_token = kwargs.pop("use_auth_token", None)
if use_auth_token is not None:
warnings.warn(
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
FutureWarning,
)
if token is not None:
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
token = use_auth_token
resolved_config_file = cached_file(
pretrained_model_name_or_path,
VIDEO_PROCESSOR_NAME,
cache_dir=cache_dir,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
token=token,
revision=revision,
local_files_only=local_files_only,
)
if resolved_config_file is None:
logger.info(
"Could not locate the video processor configuration file, will try to use the model config instead."
)
return {}
with open(resolved_config_file, encoding="utf-8") as reader:
return json.load(reader)
@requires(backends=("vision", "torchvision"))
class AutoVideoProcessor:
r"""
This is a generic video processor class that will be instantiated as one of the video processor classes of the
library when created with the [`AutoVideoProcessor.from_pretrained`] class method.
This class cannot be instantiated directly using `__init__()` (throws an error).
"""
def __init__(self):
raise OSError(
"AutoVideoProcessor is designed to be instantiated "
"using the `AutoVideoProcessor.from_pretrained(pretrained_model_name_or_path)` method."
)
@classmethod
@replace_list_option_in_docstrings(VIDEO_PROCESSOR_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
r"""
Instantiate one of the video processor classes of the library from a pretrained model vocabulary.
The video processor class to instantiate is selected based on the `model_type` property of the config object
(either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
List options
Params:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained video_processor hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a video processor file saved using the
[`~video_processing_utils.BaseVideoProcessor.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- a path or url to a saved video processor JSON *file*, e.g.,
`./my_model_directory/preprocessor_config.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model video processor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the video processor files and override the cached versions if
they exist.
resume_download:
Deprecated and ignored. All downloads are now resumed by default when possible.
Will be removed in v5 of Transformers.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final video processor object. If `True`, then this
functions returns a `Tuple(video_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not video processor attributes: i.e., the part of
`kwargs` which has not been used to update `video_processor` and is otherwise ignored.
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
should only be set to `True` for repositories you trust and in which you have read the code, as it will
execute code present on the Hub on your local machine.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are video processor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* video processor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
<Tip>
Passing `token=True` is required when you want to use a private model.
</Tip>
Examples:
```python
>>> from transformers import AutoVideoProcessor
>>> # Download video processor from huggingface.co and cache.
>>> video_processor = AutoVideoProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-0.5b-ov-hf")
>>> # If video processor files are in a directory (e.g. video processor was saved using *save_pretrained('./test/saved_model/')*)
>>> # video_processor = AutoVideoProcessor.from_pretrained("./test/saved_model/")
```"""
use_auth_token = kwargs.pop("use_auth_token", None)
if use_auth_token is not None:
warnings.warn(
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
FutureWarning,
)
if kwargs.get("token") is not None:
raise ValueError(
"`token` and `use_auth_token` are both specified. Please set only the argument `token`."
)
kwargs["token"] = use_auth_token
config = kwargs.pop("config", None)
trust_remote_code = kwargs.pop("trust_remote_code", None)
kwargs["_from_auto"] = True
config_dict, _ = BaseVideoProcessor.get_video_processor_dict(pretrained_model_name_or_path, **kwargs)
video_processor_class = config_dict.get("video_processor_type", None)
video_processor_auto_map = None
if "AutoVideoProcessor" in config_dict.get("auto_map", {}):
video_processor_auto_map = config_dict["auto_map"]["AutoVideoProcessor"]
# If we still don't have the video processor class, check if we're loading from a previous image processor config
# and if so, infer the video processor class from there.
if video_processor_class is None and video_processor_auto_map is None:
image_processor_class = config_dict.pop("image_processor_type", None)
if image_processor_class is not None:
video_processor_class_inferred = image_processor_class.replace("ImageProcessor", "VideoProcessor")
# Some models have different image processors, e.g. InternVL uses GotOCRImageProcessor
# We cannot use GotOCRVideoProcessor when falling back for BC and should try to infer from config later on
if video_processor_class_inferred in VIDEO_PROCESSOR_MAPPING_NAMES.values():
video_processor_class = video_processor_class_inferred
if "AutoImageProcessor" in config_dict.get("auto_map", {}):
image_processor_auto_map = config_dict["auto_map"]["AutoImageProcessor"]
video_processor_auto_map = image_processor_auto_map.replace("ImageProcessor", "VideoProcessor")
# If we don't find the video processor class in the video processor config, let's try the model config.
if video_processor_class is None and video_processor_auto_map is None:
if not isinstance(config, PretrainedConfig):
config = AutoConfig.from_pretrained(
pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
)
# It could be in `config.video_processor_type``
video_processor_class = getattr(config, "video_processor_type", None)
if hasattr(config, "auto_map") and "AutoVideoProcessor" in config.auto_map:
video_processor_auto_map = config.auto_map["AutoVideoProcessor"]
if video_processor_class is not None:
video_processor_class = video_processor_class_from_name(video_processor_class)
has_remote_code = video_processor_auto_map is not None
has_local_code = video_processor_class is not None or type(config) in VIDEO_PROCESSOR_MAPPING
trust_remote_code = resolve_trust_remote_code(
trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
)
if has_remote_code and trust_remote_code:
class_ref = video_processor_auto_map
video_processor_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)
_ = kwargs.pop("code_revision", None)
video_processor_class.register_for_auto_class()
return video_processor_class.from_dict(config_dict, **kwargs)
elif video_processor_class is not None:
return video_processor_class.from_dict(config_dict, **kwargs)
# Last try: we use the VIDEO_PROCESSOR_MAPPING.
elif type(config) in VIDEO_PROCESSOR_MAPPING:
video_processor_class = VIDEO_PROCESSOR_MAPPING[type(config)]
if video_processor_class is not None:
return video_processor_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
raise ValueError(
"This video processor cannot be instantiated. Please make sure you have `torchvision` installed."
)
raise ValueError(
f"Unrecognized video processor in {pretrained_model_name_or_path}. Should have a "
f"`video_processor_type` key in its {VIDEO_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in VIDEO_PROCESSOR_MAPPING_NAMES)}"
)
@staticmethod
def register(
config_class,
video_processor_class,
exist_ok=False,
):
"""
Register a new video processor for this class.
Args:
config_class ([`PretrainedConfig`]):
The configuration corresponding to the model to register.
video_processor_class ([`BaseVideoProcessor`]):
The video processor to register.
"""
VIDEO_PROCESSOR_MAPPING.register(config_class, video_processor_class, exist_ok=exist_ok)
__all__ = ["VIDEO_PROCESSOR_MAPPING", "AutoVideoProcessor"]
| transformers/src/transformers/models/auto/video_processing_auto.py/0 | {
"file_path": "transformers/src/transformers/models/auto/video_processing_auto.py",
"repo_id": "transformers",
"token_count": 7425
} | 441 |
"""Convert Bark checkpoint."""
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
set_seed(770)
new_layer_name_dict = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
REMOTE_MODEL_PATHS = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache")
CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def _get_ckpt_path(model_type, use_small=False):
key = model_type
if use_small:
key += "_small"
return os.path.join(CACHE_DIR, REMOTE_MODEL_PATHS[key]["file_name"])
def _download(from_hf_path, file_name):
os.makedirs(CACHE_DIR, exist_ok=True)
hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR)
def _load_model(ckpt_path, device, use_small=False, model_type="text"):
if model_type == "text":
ModelClass = BarkSemanticModel
ConfigClass = BarkSemanticConfig
GenerationConfigClass = BarkSemanticGenerationConfig
elif model_type == "coarse":
ModelClass = BarkCoarseModel
ConfigClass = BarkCoarseConfig
GenerationConfigClass = BarkCoarseGenerationConfig
elif model_type == "fine":
ModelClass = BarkFineModel
ConfigClass = BarkFineConfig
GenerationConfigClass = BarkFineGenerationConfig
else:
raise NotImplementedError()
model_key = f"{model_type}_small" if use_small else model_type
model_info = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(ckpt_path):
logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`.")
_download(model_info["repo_id"], model_info["file_name"])
checkpoint = torch.load(ckpt_path, map_location=device, weights_only=True)
# this is a hack
model_args = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
model_args["input_vocab_size"] = model_args["vocab_size"]
model_args["output_vocab_size"] = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
model_args["num_heads"] = model_args.pop("n_head")
model_args["hidden_size"] = model_args.pop("n_embd")
model_args["num_layers"] = model_args.pop("n_layer")
model_config = ConfigClass(**checkpoint["model_args"])
model = ModelClass(config=model_config)
model_generation_config = GenerationConfigClass()
model.generation_config = model_generation_config
state_dict = checkpoint["model"]
# fixup checkpoint
unwanted_prefix = "_orig_mod."
for k in state_dict:
if k.startswith(unwanted_prefix):
# replace part of the key with corresponding layer name in HF implementation
new_k = k[len(unwanted_prefix) :]
for old_layer_name, new_layer_name in new_layer_name_dict.items():
new_k = new_k.replace(old_layer_name, new_layer_name)
state_dict[new_k] = state_dict.pop(k)
extra_keys = set(state_dict.keys()) - set(model.state_dict().keys())
extra_keys = {k for k in extra_keys if not k.endswith(".attn.bias")}
missing_keys = set(model.state_dict().keys()) - set(state_dict.keys())
missing_keys = {k for k in missing_keys if not k.endswith(".attn.bias")}
if len(extra_keys) != 0:
raise ValueError(f"extra keys found: {extra_keys}")
if len(missing_keys) != 0:
raise ValueError(f"missing keys: {missing_keys}")
model.load_state_dict(state_dict, strict=False)
n_params = model.num_parameters(exclude_embeddings=True)
val_loss = checkpoint["best_val_loss"].item()
logger.info(f"model loaded: {round(n_params / 1e6, 1)}M params, {round(val_loss, 3)} loss")
model.eval()
model.to(device)
del checkpoint, state_dict
return model
def load_model(pytorch_dump_folder_path, use_small=False, model_type="text"):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
device = "cpu" # do conversion on cpu
ckpt_path = _get_ckpt_path(model_type, use_small=use_small)
model = _load_model(ckpt_path, device, model_type=model_type, use_small=use_small)
# load bark initial model
bark_model = _bark_load_model(ckpt_path, "cpu", model_type=model_type, use_small=use_small)
if model_type == "text":
bark_model = bark_model["model"]
if model.num_parameters(exclude_embeddings=True) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters")
# check if same output as the bark model
batch_size = 5
sequence_length = 10
if model_type in ["text", "coarse"]:
vec = torch.randint(256, (batch_size, sequence_length), dtype=torch.int)
output_old_model = bark_model(vec)[0]
output_new_model_total = model(vec)
# take last logits
output_new_model = output_new_model_total.logits[:, [-1], :]
else:
prediction_codebook_channel = 3
n_codes_total = 8
vec = torch.randint(256, (batch_size, sequence_length, n_codes_total), dtype=torch.int)
output_new_model_total = model(prediction_codebook_channel, vec)
output_old_model = bark_model(prediction_codebook_channel, vec)
output_new_model = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape")
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal")
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
model.save_pretrained(pytorch_dump_folder_path)
def load_whole_bark_model(
semantic_path,
coarse_path,
fine_path,
append_text,
hub_path,
folder_path,
):
pytorch_dump_folder_path = os.path.join(folder_path, append_text)
semanticConfig = BarkSemanticConfig.from_pretrained(os.path.join(semantic_path, "config.json"))
coarseAcousticConfig = BarkCoarseConfig.from_pretrained(os.path.join(coarse_path, "config.json"))
fineAcousticConfig = BarkFineConfig.from_pretrained(os.path.join(fine_path, "config.json"))
codecConfig = EncodecConfig.from_pretrained("facebook/encodec_24khz")
semantic = BarkSemanticModel.from_pretrained(semantic_path)
coarseAcoustic = BarkCoarseModel.from_pretrained(coarse_path)
fineAcoustic = BarkFineModel.from_pretrained(fine_path)
codec = EncodecModel.from_pretrained("facebook/encodec_24khz")
bark_config = BarkConfig.from_sub_model_configs(
semanticConfig, coarseAcousticConfig, fineAcousticConfig, codecConfig
)
bark_generation_config = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config, coarseAcoustic.generation_config, fineAcoustic.generation_config
)
bark = BarkModel(bark_config)
bark.semantic = semantic
bark.coarse_acoustics = coarseAcoustic
bark.fine_acoustics = fineAcoustic
bark.codec_model = codec
bark.generation_config = bark_generation_config
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
bark.save_pretrained(pytorch_dump_folder_path, repo_id=hub_path, push_to_hub=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
args = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| transformers/src/transformers/models/bark/convert_suno_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/bark/convert_suno_to_hf.py",
"repo_id": "transformers",
"token_count": 3743
} | 442 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert BiT checkpoints from the timm library."""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def get_config(model_name):
repo_id = "huggingface/label-files"
filename = "imagenet-1k-id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k): v for k, v in id2label.items()}
label2id = {v: k for k, v in id2label.items()}
conv_layer = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
config = BitConfig(
conv_layer=conv_layer,
num_labels=1000,
id2label=id2label,
label2id=label2id,
)
return config
def rename_key(name):
if "stem.conv" in name:
name = name.replace("stem.conv", "bit.embedder.convolution")
if "blocks" in name:
name = name.replace("blocks", "layers")
if "head.fc" in name:
name = name.replace("head.fc", "classifier.1")
if name.startswith("norm"):
name = "bit." + name
if "bit" not in name and "classifier" not in name:
name = "bit.encoder." + name
return name
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@torch.no_grad()
def convert_bit_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
"""
Copy/paste/tweak model's weights to our BiT structure.
"""
# define default BiT configuration
config = get_config(model_name)
# load original model from timm
timm_model = create_model(model_name, pretrained=True)
timm_model.eval()
# load state_dict of original model
state_dict = timm_model.state_dict()
for key in state_dict.copy():
val = state_dict.pop(key)
state_dict[rename_key(key)] = val.squeeze() if "head" in key else val
# load HuggingFace model
model = BitForImageClassification(config)
model.eval()
model.load_state_dict(state_dict)
# create image processor
transform = create_transform(**resolve_data_config({}, model=timm_model))
timm_transforms = transform.transforms
pillow_resamplings = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
processor = BitImageProcessor(
do_resize=True,
size={"shortest_edge": timm_transforms[0].size},
resample=pillow_resamplings[timm_transforms[0].interpolation.value],
do_center_crop=True,
crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]},
do_normalize=True,
image_mean=timm_transforms[-1].mean.tolist(),
image_std=timm_transforms[-1].std.tolist(),
)
image = prepare_img()
timm_pixel_values = transform(image).unsqueeze(0)
pixel_values = processor(image, return_tensors="pt").pixel_values
# verify pixel values
assert torch.allclose(timm_pixel_values, pixel_values)
# verify logits
with torch.no_grad():
outputs = model(pixel_values)
logits = outputs.logits
print("Logits:", logits[0, :3])
print("Predicted class:", model.config.id2label[logits.argmax(-1).item()])
timm_logits = timm_model(pixel_values)
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(timm_logits, outputs.logits, atol=1e-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print(f"Pushing model {model_name} and processor to the hub")
model.push_to_hub(f"ybelkada/{model_name}")
processor.push_to_hub(f"ybelkada/{model_name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
args = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| transformers/src/transformers/models/bit/convert_bit_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/bit/convert_bit_to_pytorch.py",
"repo_id": "transformers",
"token_count": 2307
} | 443 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for Blip.
"""
from typing import Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput
class BlipProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"add_special_tokens": True,
"padding": False,
"stride": 0,
"return_overflowing_tokens": False,
"return_special_tokens_mask": False,
"return_offsets_mapping": False,
"return_token_type_ids": False,
"return_length": False,
"verbose": True,
},
"images_kwargs": {},
}
class BlipProcessor(ProcessorMixin):
r"""
Constructs a BLIP processor which wraps a BERT tokenizer and BLIP image processor into a single processor.
[`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`BertTokenizerFast`]. See the
docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information.
Args:
image_processor (`BlipImageProcessor`):
An instance of [`BlipImageProcessor`]. The image processor is a required input.
tokenizer (`BertTokenizerFast`):
An instance of ['BertTokenizerFast`]. The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = ("BlipImageProcessor", "BlipImageProcessorFast")
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
def __init__(self, image_processor, tokenizer, **kwargs):
tokenizer.return_token_type_ids = False
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
def __call__(
self,
images: ImageInput = None,
text: Optional[Union[str, list[str], TextInput, PreTokenizedInput]] = None,
audio=None,
videos=None,
**kwargs: Unpack[BlipProcessorKwargs],
) -> BatchEncoding:
"""
This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
Args:
images (`ImageInput`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
"""
if images is None and text is None:
raise ValueError("You have to specify either images or text.")
text_encoding = None
# add pixel_values encoding. If we also have text_encoding, update image encoding and return it.
# else, return the text encoding.
output_kwargs = self._merge_kwargs(
BlipProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if text is not None:
text_encoding = self.tokenizer(text, **output_kwargs["text_kwargs"])
if images is not None:
encoding_image_processor = self.image_processor(images, **output_kwargs["images_kwargs"])
if text_encoding is not None:
encoding_image_processor.update(text_encoding)
return encoding_image_processor
return text_encoding
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
tokenizer_input_names = [name for name in tokenizer_input_names if name != "token_type_ids"]
return tokenizer_input_names + image_processor_input_names
__all__ = ["BlipProcessor"]
| transformers/src/transformers/models/blip/processing_blip.py/0 | {
"file_path": "transformers/src/transformers/models/blip/processing_blip.py",
"repo_id": "transformers",
"token_count": 2113
} | 444 |
# coding=utf-8
# Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BridgeTower Model"""
import math
from collections import OrderedDict
from dataclasses import dataclass
from typing import Optional, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN, QuickGELUActivation
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
MaskedLMOutput,
ModelOutput,
SequenceClassifierOutput,
)
from ...modeling_utils import PreTrainedModel, apply_chunking_to_forward
from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
from ...utils import auto_docstring, logging, torch_int
from ...utils.deprecation import deprecate_kwarg
from .configuration_bridgetower import BridgeTowerConfig, BridgeTowerTextConfig, BridgeTowerVisionConfig
logger = logging.get_logger(__name__)
_TOKENIZER_FOR_DOC = "RobertaTokenizer"
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`BridgeTowerModel`].
"""
)
class BridgeTowerModelOutput(ModelOutput):
r"""
text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_size)`):
Sequence of hidden-states at the text output of the last layer of the model.
image_features (`torch.FloatTensor` of shape `(batch_size, image_sequence_length, hidden_size)`):
Sequence of hidden-states at the image output of the last layer of the model.
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size x 2)`):
Concatenation of last layer hidden-state of the first token of the text and image sequence (classification
token), respectively, after further processing through layers used for auxiliary pretraining tasks.
"""
text_features: Optional[torch.FloatTensor] = None
image_features: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of ['BridgeTowerForContrastiveLearning']
"""
)
class BridgeTowerContrastiveOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
Image-text contrastive loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
text_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`):
The text embeddings obtained by applying the projection layer to the pooler_output.
image_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
cross_embeds (`torch.FloatTensor)`, *optional*, returned when model is initialized with `with_projection=True`):
The text-image cross-modal embeddings obtained by applying the projection layer to the pooler_output.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
text_embeds: Optional[tuple[torch.FloatTensor]] = None
image_embeds: Optional[tuple[torch.FloatTensor]] = None
cross_embeds: Optional[tuple[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
class BridgeTowerResidualAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.attn = nn.MultiheadAttention(config.hidden_size, config.hidden_size // 64)
self.ln_1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.mlp = nn.ModuleDict(
OrderedDict(
[
("c_fc", nn.Linear(config.hidden_size, config.hidden_size * 4)),
("gelu", QuickGELUActivation()),
("c_proj", nn.Linear(config.hidden_size * 4, config.hidden_size)),
]
)
)
self.ln_2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attn_mask = None
def attention(self, hidden_state: torch.Tensor, attention_mask: torch.Tensor):
if attention_mask is not None:
attention_mask = attention_mask.to(dtype=torch.bool, device=hidden_state.device)
self.attn_mask = (
self.attn_mask.to(dtype=hidden_state.dtype, device=hidden_state.device)
if self.attn_mask is not None
else None
)
return self.attn(
hidden_state,
hidden_state,
hidden_state,
need_weights=False,
attn_mask=self.attn_mask,
key_padding_mask=attention_mask,
)[0]
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor] = None):
residual_state = hidden_state + self.attention(self.ln_1(hidden_state), attention_mask)
hidden_state = self.ln_2(residual_state)
for layer in self.mlp.values():
hidden_state = layer(hidden_state)
hidden_state = residual_state + hidden_state
return hidden_state
class BridgeTowerTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.hidden_size = config.hidden_size
self.num_hidden_layers = config.num_hidden_layers
if config.remove_last_layer:
self.resblocks = nn.ModuleList(
[BridgeTowerResidualAttention(config) for _ in range(self.num_hidden_layers - 1)]
)
else:
self.resblocks = nn.ModuleList(
[BridgeTowerResidualAttention(config) for _ in range(self.num_hidden_layers)]
)
self.stop_gradient = config.stop_gradient
def forward(self, hidden_state: torch.Tensor, attention_mask: Optional[torch.Tensor] = None):
hidden_states = []
for block in self.resblocks:
hidden_state = block(hidden_state, attention_mask)
if self.stop_gradient:
hidden_states.append(hidden_state.detach())
else:
hidden_states.append(hidden_state)
return hidden_states
# Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->BridgeTower
class BridgeTowerVisionEmbeddings(nn.Module):
def __init__(self, config: BridgeTowerVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
bias=False,
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
position_embedding = self.position_embedding.weight.unsqueeze(0)
num_positions = position_embedding.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embedding(self.position_ids)
class_pos_embed = position_embedding[:, :1]
patch_pos_embed = position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
batch_size, _, height, width = pixel_values.shape
if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size):
raise ValueError(
f"Input image size ({height}*{width}) doesn't match model ({self.image_size}*{self.image_size})."
)
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
if interpolate_pos_encoding:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
else:
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
class BridgeTowerVisionTransformer(nn.Module):
def __init__(self, config):
super().__init__()
self.embeddings = BridgeTowerVisionEmbeddings(config)
self.ln_pre = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.transformer = BridgeTowerTransformer(config)
self.ln_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.share_layernorm = config.share_layernorm
if not config.share_layernorm:
self.ln_separate = nn.ModuleList(
[nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) for _ in range(config.num_hidden_layers)]
)
def forward(
self,
pixel_values: torch.Tensor,
attention_mask,
interpolate_pos_encoding: bool = False,
):
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding)
hidden_states = self.ln_pre(hidden_states)
# NLD -> LND
hidden_states = hidden_states.permute(1, 0, 2)
hidden_states = self.transformer(hidden_states, attention_mask)
# shape = [num_hidden_layers, hidden_size, *, grid ** 2]
hidden_states = torch.stack(hidden_states, dim=0)
# shape = [num_hidden_layers, *, hidden_size, grid ** 2]
hidden_states = hidden_states.permute(0, 2, 1, 3)
if self.share_layernorm:
hidden_states = self.ln_post(hidden_states)
else:
hidden_states_stack = []
for hidden_states, ln in zip(hidden_states, self.ln_separate):
hidden_states = ln(hidden_states)
hidden_states_stack.append(hidden_states)
# shape = [num_hidden_layers, *, hidden_size, grid ** 2]
hidden_states = torch.stack(hidden_states_stack, dim=0)
return hidden_states
def forward_pre(
self,
pixel_values: torch.Tensor,
interpolate_pos_encoding: bool = False,
):
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = self.ln_pre(hidden_states)
# NLD -> LND
hidden_states = hidden_states.permute(1, 0, 2)
return hidden_states
def forward_post(self, hidden_state: torch.Tensor):
visual_output_post = hidden_state.permute(1, 0, 2)
visual_output_post = self.ln_post(visual_output_post)
return visual_output_post
class BridgeTowerLinkTower(nn.Module):
def __init__(self, config):
super().__init__()
self.link_tower_type = config.link_tower_type
self.hidden_size = config.hidden_size
if config.link_tower_type in ["add", "scaled_add", "interpolate"]:
if config.link_tower_type == "scaled_add":
self.scaled_factor = nn.Parameter(torch.tensor(1.0))
elif config.link_tower_type == "interpolate":
self.beta = nn.Parameter(torch.tensor(0.5))
self.LayerNorm = nn.LayerNorm(self.hidden_size, eps=config.layer_norm_eps)
else:
raise NotImplementedError(f"link_tower_type {config.link_tower_type} is not implemented")
def forward(self, hidden_states, cross_modal_hidden_states, attention_mask):
if self.link_tower_type == "add":
return self.LayerNorm(hidden_states + cross_modal_hidden_states)
elif self.link_tower_type == "scaled_add":
return self.LayerNorm(hidden_states * self.scaled_factor + cross_modal_hidden_states)
elif self.link_tower_type == "interpolate":
return self.LayerNorm(hidden_states * (1 - self.beta) + cross_modal_hidden_states * self.beta)
else:
raise NotImplementedError(f"link_tower_type {self.link_tower_type} is not implemented")
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BridgeTower
class BridgeTowerSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BridgeTower
class BridgeTowerIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BridgeTower
class BridgeTowerOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->BridgeTower
class BridgeTowerPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->BridgeTower
class BridgeTowerSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
self.layer_idx = layer_idx
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor]:
batch_size, seq_length, _ = hidden_states.shape
query_layer = self.query(hidden_states)
query_layer = query_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(
1, 2
)
is_cross_attention = encoder_hidden_states is not None
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_layer from cache
curr_past_key_value = past_key_values.cross_attention_cache
else:
curr_past_key_value = past_key_values.self_attention_cache
else:
curr_past_key_value = past_key_values
current_states = encoder_hidden_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_layer = curr_past_key_value.layers[self.layer_idx].keys
value_layer = curr_past_key_value.layers[self.layer_idx].values
else:
key_layer = self.key(current_states)
key_layer = key_layer.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(
1, 2
)
value_layer = self.value(current_states)
value_layer = value_layer.view(
batch_size, -1, self.num_attention_heads, self.attention_head_size
).transpose(1, 2)
if past_key_values is not None:
# save all key/value_layer to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_layer, value_layer = curr_past_key_value.update(
key_layer, value_layer, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention:
past_key_values.is_updated[self.layer_idx] = True
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
query_length, key_length = query_layer.shape[2], key_layer.shape[2]
if past_key_values is not None:
position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
-1, 1
)
else:
position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BridgeTowerModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
return context_layer, attention_probs
BRIDGE_TOWER_SELF_ATTENTION_CLASSES = {
"eager": BridgeTowerSelfAttention,
}
# Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->BridgeTower,BERT->BRIDGE_TOWER
class BridgeTowerAttention(nn.Module):
def __init__(self, config, position_embedding_type=None, layer_idx=None):
super().__init__()
self.self = BRIDGE_TOWER_SELF_ATTENTION_CLASSES[config._attn_implementation](
config,
position_embedding_type=position_embedding_type,
layer_idx=layer_idx,
)
self.output = BridgeTowerSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BridgeTowerBertCrossLayer(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BridgeTowerAttention(config, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
self.crossattention = BridgeTowerAttention(config, layer_idx=layer_idx)
self.intermediate = BridgeTowerIntermediate(config)
self.output = BridgeTowerOutput(config)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states,
encoder_hidden_states,
attention_mask=None,
head_mask=None,
encoder_attention_mask=None,
past_key_values=None,
output_attentions=False,
cache_position=None,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attention_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
head_mask=None,
output_attentions=output_attentions,
past_key_values=None,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
# add self attentions if we output attention weights
outputs = self_attention_outputs[1:]
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask=encoder_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
attention_output = cross_attention_outputs[0]
# add cross attentions if we output attention weights
outputs = outputs + cross_attention_outputs[1:]
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BridgeTowerTextLayer(GradientCheckpointingLayer):
def __init__(self, config, layer_idx=None):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BridgeTowerAttention(config, layer_idx=layer_idx)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BridgeTowerAttention(config, position_embedding_type="absolute", layer_idx=layer_idx)
self.intermediate = BridgeTowerIntermediate(config)
self.output = BridgeTowerOutput(config)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: Optional[bool] = False,
cache_position: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor]:
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attention_outputs = self.attention(
hidden_states,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
past_key_values=past_key_values,
cache_position=cache_position,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
" by setting `config.add_cross_attention=True`"
)
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask=encoder_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
return (layer_output,) + outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
# Copied from transformers.models.roberta.modeling_roberta.RobertaEncoder with Roberta->BridgeTowerText
class BridgeTowerTextEncoder(nn.Module):
def __init__(self, config, layer_idx=None):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[BridgeTowerTextLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[tuple[tuple[torch.FloatTensor]]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if use_cache and self.config.is_decoder and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(), DynamicCache())
if use_cache and self.config.is_decoder and isinstance(past_key_values, tuple):
logger.warning_once(
"Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. "
"You should pass an instance of `EncoderDecoderCache` instead, e.g. "
"`past_key_values=EncoderDecoderCache.from_legacy_cache(past_key_values)`."
)
past_key_values = EncoderDecoderCache.from_legacy_cache(past_key_values)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
cache_position=cache_position,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
past_key_values,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
# Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->BridgeTowerText
class BridgeTowerTextEmbeddings(nn.Module):
"""
Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
"""
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
self.register_buffer(
"token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
)
# End copy
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
# Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
@auto_docstring
class BridgeTowerPreTrainedModel(PreTrainedModel):
config: BridgeTowerConfig
base_model_prefix = "bridgetower"
supports_gradient_checkpointing = False
_no_split_modules = ["BridgeTowerSelfAttention", "BridgeTowerResidualAttention"]
_skip_keys_device_placement = "past_key_values"
def _init_weights(self, module: nn.Module):
std = self.config.initializer_factor
if isinstance(module, BridgeTowerVisionTransformer):
proj_std = (self.config.hidden_size**-0.5) * ((2 * self.config.num_hidden_layers) ** -0.5)
attn_std = self.config.hidden_size**-0.5
fc_std = (2 * self.config.hidden_size) ** -0.5
for block in module.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std * std)
block.attn.in_proj_bias.data.zero_()
nn.init.normal_(block.attn.out_proj.weight, std=proj_std * std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std * std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std * std)
nn.init.normal_(module.embeddings.class_embedding, std=attn_std * std)
nn.init.normal_(module.embeddings.position_embedding.weight, std=attn_std * std)
elif isinstance(module, (nn.Linear, nn.Conv2d, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.05 * std)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, BridgeTowerForContrastiveLearning):
module.logit_scale.data.fill_(self.config.logit_scale_init_value)
if isinstance(module, (nn.Linear, BridgeTowerMLMHead)) and module.bias is not None:
module.bias.data.zero_()
class BridgeTowerVisionModel(BridgeTowerPreTrainedModel):
config: BridgeTowerVisionConfig
def __init__(self, config):
super().__init__(config)
self.visual = BridgeTowerVisionTransformer(config)
@property
def dtype(self):
return self.visual.embeddings.patch_embedding.weight.dtype
def forward(self, image, image_mask=None, interpolate_pos_encoding=False):
return self.visual(image.type(self.dtype), image_mask, interpolate_pos_encoding)
@auto_docstring(
custom_intro="""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in *Attention is
all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
.. _*Attention is all you need*: https://huggingface.co/papers/1706.03762
"""
)
class BridgeTowerTextModel(BridgeTowerPreTrainedModel):
config: BridgeTowerTextConfig
def __init__(self, config, add_pooling_layer=True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = BridgeTowerTextEmbeddings(config)
self.encoder = BridgeTowerTextEncoder(config)
self.pooler = BridgeTowerPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[list[torch.FloatTensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
past_key_values_length = 0
if past_key_values is not None:
past_key_values_length = (
past_key_values[0][0].shape[-2]
if not isinstance(past_key_values, Cache)
else past_key_values.get_seq_length()
)
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
The bare BridgeTower Model transformer outputting BridgeTowerModelOutput object without any specific head on
"""
)
class BridgeTowerModel(BridgeTowerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
vision_config = config.vision_config
text_config = config.text_config
if config.share_cross_modal_transformer_layers:
self.cross_modal_text_transform = nn.Linear(text_config.hidden_size, config.hidden_size)
self.cross_modal_image_transform = nn.Linear(vision_config.hidden_size, config.hidden_size)
else:
self.cross_modal_text_transform = nn.ModuleList(
[nn.Linear(text_config.hidden_size, config.hidden_size) for _ in range(config.num_hidden_layers)]
)
self.cross_modal_image_transform = nn.ModuleList(
[nn.Linear(vision_config.hidden_size, config.hidden_size) for _ in range(config.num_hidden_layers)]
)
self.token_type_embeddings = nn.Embedding(2, config.hidden_size)
self.vision_model = BridgeTowerVisionModel(vision_config)
self.text_model = BridgeTowerTextModel(text_config)
if not vision_config.share_layernorm and config.init_layernorm_from_vision_encoder:
for ln in self.vision_model.visual.cross_modal_ln_separate:
ln.weight.data = self.vision_model.visual.ln_post.weight.data
ln.bias.data = self.vision_model.visual.ln_post.bias.data
self.cross_modal_image_layers = nn.ModuleList(
[BridgeTowerBertCrossLayer(text_config, layer_idx=i) for i in range(config.num_hidden_layers)]
)
self.cross_modal_text_layers = nn.ModuleList(
[BridgeTowerBertCrossLayer(text_config, layer_idx=i) for i in range(config.num_hidden_layers)]
)
# Class token => Linear => Tanh
self.cross_modal_image_pooler = BridgeTowerPooler(config)
self.cross_modal_text_pooler = BridgeTowerPooler(config)
# Initialize BridgeTower Components
self.cross_modal_text_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.cross_modal_image_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if config.share_link_tower_layers:
self.cross_modal_text_link_tower = BridgeTowerLinkTower(config)
self.cross_modal_image_link_tower = BridgeTowerLinkTower(config)
else:
self.cross_modal_text_link_tower = nn.ModuleList(
[BridgeTowerLinkTower(config) for _ in range(config.num_hidden_layers - 1)]
)
self.cross_modal_image_link_tower = nn.ModuleList(
[BridgeTowerLinkTower(config) for _ in range(config.num_hidden_layers - 1)]
)
self.post_init()
def get_input_embeddings(self):
return self.text_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.text_model.set_input_embeddings(value)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_embeds: Optional[torch.FloatTensor] = None,
image_token_type_idx: Optional[int] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.LongTensor] = None,
interpolate_pos_encoding: bool = False,
) -> Union[tuple[torch.Tensor], BridgeTowerModelOutput]:
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
image_token_type_idx (`int`, *optional*):
- The token type ids for images.
output_hidden_states (`bool`, *optional*):
If set to `True`, hidden states are returned as a list containing the hidden states of text, image, and
cross-modal components respectively. i.e. `(hidden_states_text, hidden_states_image,
hidden_states_cross_modal)` where each element is a list of the hidden states of the corresponding
modality. `hidden_states_txt/img` are a list of tensors corresponding to unimodal hidden states and
`hidden_states_cross_modal` is a list of tuples containing `cross_modal_text_hidden_states` and
`cross_modal_image_hidden_states` of each brdige layer.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels are currently not supported.
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerModel
>>> from PIL import Image
>>> import requests
>>> # prepare image and text
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> text = "hello world"
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base")
>>> model = BridgeTowerModel.from_pretrained("BridgeTower/bridgetower-base")
>>> inputs = processor(image, text, return_tensors="pt")
>>> outputs = model(**inputs)
>>> outputs.keys()
odict_keys(['text_features', 'image_features', 'pooler_output'])
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
all_hidden_states_text = () if output_hidden_states else None
all_hidden_states_image = () if output_hidden_states else None
all_hidden_states_cross = () if output_hidden_states else None
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if inputs_embeds is not None and input_ids is None:
raise NotImplementedError(
"BridgeTowerModel does not use `inputs_embeds`. Make sure to pass in `input_ids` instead."
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
image_token_type_idx = image_token_type_idx if image_token_type_idx else 1
input_shape = input_ids.size()
text_embeds = self.text_model.embeddings(input_ids=input_ids)
if output_hidden_states:
all_hidden_states_text += (text_embeds,)
if attention_mask is None:
attention_mask = torch.ones(input_shape, dtype=torch.long, device=input_ids.device)
extend_text_masks = self.text_model.get_extended_attention_mask(attention_mask, input_shape).to(
input_ids.device
)
# The split_index determines how many layers of the uni-modal encoder are applied before the cross-modal encoder
split_index = len(self.text_model.encoder.layer) - self.config.num_hidden_layers + 1
# Run the first 'split_index' layers of the textual encoder
for layer in self.text_model.encoder.layer[:split_index]:
text_embeds = layer(text_embeds, extend_text_masks)[0]
if output_hidden_states:
all_hidden_states_text += (text_embeds,)
if image_embeds is None:
image_embeds = self.vision_model.visual.forward_pre(
pixel_values.type(self.vision_model.dtype), interpolate_pos_encoding=interpolate_pos_encoding
)
else:
# Permute as BridgeTowerResidualAttention has batch_first=True
image_embeds = image_embeds.permute(1, 0, 2)
if output_hidden_states:
all_hidden_states_image += (image_embeds,)
# Run the first 'split_index' layers of the visual encoder
for block in self.vision_model.visual.transformer.resblocks[:split_index]:
image_embeds = block(image_embeds)
if output_hidden_states:
all_hidden_states_image += (image_embeds,)
image_embeds_with_ln = self.vision_model.visual.forward_post(image_embeds.type(self.vision_model.dtype))
# first layer is a special case because we don't have the output from the cross-encoder yet
cross_modal_text = self.cross_modal_text_transform(text_embeds)
text_token_type_embeddings = self.token_type_embeddings(
torch.zeros(1, dtype=torch.long, device=input_ids.device)
).expand_as(cross_modal_text)
cross_modal_text = self.cross_modal_text_layernorm(cross_modal_text + text_token_type_embeddings)
image_embeds_with_ln = self.cross_modal_image_transform(image_embeds_with_ln)
image_token_type_embeddings = self.token_type_embeddings(
torch.full((1,), image_token_type_idx, dtype=torch.long, device=input_ids.device)
).expand_as(image_embeds_with_ln)
image_embeds_with_ln = image_embeds_with_ln + image_token_type_embeddings
cross_modal_image = self.cross_modal_image_layernorm(image_embeds_with_ln)
pixel_mask = torch.ones(
(cross_modal_image.size(0), cross_modal_image.size(1)),
dtype=torch.long,
device=input_ids.device,
)
extend_image_masks = self.text_model.get_extended_attention_mask(pixel_mask, pixel_mask.size()).to(
input_ids.device
)
layer_outputs_text = self.cross_modal_text_layers[0](
cross_modal_text,
cross_modal_image,
attention_mask=extend_text_masks,
encoder_attention_mask=extend_image_masks,
output_attentions=output_attentions,
)
cross_text_features = layer_outputs_text[0]
layer_outputs_image = self.cross_modal_image_layers[0](
cross_modal_image,
cross_modal_text,
attention_mask=extend_image_masks,
encoder_attention_mask=extend_text_masks,
output_attentions=output_attentions,
)
cross_image_features = layer_outputs_image[0]
if output_hidden_states:
all_hidden_states_cross += ((cross_text_features, cross_image_features),)
if output_attentions:
all_self_attentions += ((layer_outputs_text[1], layer_outputs_image[1]),)
link_layer_index = 0
# Each of the top 6 layers of the visual and textual encoders ([split_index:]) is connected to each layer of
# the cross-modal encoder via bridge layers, which brings bottom-up alignment and fusion to the cross-modal encoder.
for i in range(split_index, len(self.text_model.encoder.layer)):
text_embeds = self.text_model.encoder.layer[i](text_embeds, extend_text_masks)[0]
image_embeds = self.vision_model.visual.transformer.resblocks[i](image_embeds).type(
self.vision_model.dtype
)
image_embeds_with_ln = (
self.cross_modal_image_transform(self.vision_model.visual.forward_post(image_embeds))
+ image_token_type_embeddings
)
text_link_tower = self.cross_modal_text_link_tower[link_layer_index]
image_link_tower = self.cross_modal_image_link_tower[link_layer_index]
# Bridge layers for textual and visual encoders
cross_text_features_ = text_link_tower(
self.cross_modal_text_transform(text_embeds) + text_token_type_embeddings,
cross_text_features,
extend_text_masks,
)
cross_image_features_ = image_link_tower(image_embeds_with_ln, cross_image_features, extend_image_masks)
# Cross-modal encoder via bridge layers of textual and visual encoders
layer_outputs_text = self.cross_modal_text_layers[link_layer_index + 1](
cross_text_features_,
cross_image_features_,
attention_mask=extend_text_masks,
encoder_attention_mask=extend_image_masks,
output_attentions=output_attentions,
)
cross_text_features = layer_outputs_text[0]
layer_outputs_image = self.cross_modal_image_layers[link_layer_index + 1](
cross_image_features_,
cross_text_features_,
attention_mask=extend_image_masks,
encoder_attention_mask=extend_text_masks,
output_attentions=output_attentions,
)
cross_image_features = layer_outputs_image[0]
link_layer_index += 1
if output_hidden_states:
all_hidden_states_text += (text_embeds,)
all_hidden_states_image += (image_embeds,)
all_hidden_states_cross += ((cross_text_features, cross_image_features),)
if output_attentions:
all_self_attentions += ((layer_outputs_text[1], layer_outputs_image[1]),)
# Concatenate the cls token of the text and image features to get the final represtation
text_features, image_features = cross_text_features, cross_image_features
cls_features = self.get_cls_features(text_features, image_features)
if output_hidden_states:
all_hidden_states = (all_hidden_states_text, all_hidden_states_image, all_hidden_states_cross)
if not return_dict:
return tuple(
v
for v in [text_features, image_features, cls_features, all_hidden_states, all_self_attentions]
if v is not None
)
return BridgeTowerModelOutput(
text_features=text_features,
image_features=image_features,
pooler_output=cls_features,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
def get_cls_features(self, text_features, image_features):
cls_features_text = self.cross_modal_text_pooler(text_features)
cls_features_image = self.cross_modal_image_pooler(image_features)
return torch.cat([cls_features_text, cls_features_image], dim=-1)
# Copied from transformers.models.vilt.modeling_vilt.ViltPredictionHeadTransform with Vilt->BridgeTower
class BridgeTowerPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BridgeTowerMLMHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.config = config
self.transform = BridgeTowerPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.text_config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.text_config.vocab_size))
if weight is not None:
self.decoder.weight = weight
def forward(self, x):
mlm_score = self.transform(x)
mlm_score = self.decoder(mlm_score) + self.bias
return mlm_score
class BridgeTowerITMHead(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.fc = nn.Linear(hidden_size, 2)
def forward(self, x):
itm_score = self.fc(x)
return itm_score
@auto_docstring(
custom_intro="""
BridgeTower Model with a language modeling head on top as done during pretraining.
"""
)
class BridgeTowerForMaskedLM(BridgeTowerPreTrainedModel):
_tied_weights_keys = ["mlm_score.decoder.weight"]
def __init__(self, config):
super().__init__(config)
self.bridgetower = BridgeTowerModel(config)
self.mlm_score = BridgeTowerMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.mlm_score.decoder
def set_output_embeddings(self, new_embeddings):
self.mlm_score.decoder = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.LongTensor] = None,
) -> Union[MaskedLMOutput, tuple[torch.FloatTensor]]:
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerForMaskedLM
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000360943.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB")
>>> text = "a <mask> looking out of the window"
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
>>> model = BridgeTowerForMaskedLM.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
>>> # prepare inputs
>>> encoding = processor(image, text, return_tensors="pt")
>>> # forward pass
>>> outputs = model(**encoding)
>>> results = processor.decode(outputs.logits.argmax(dim=-1).squeeze(0).tolist())
>>> print(results)
.a cat looking out of the window.
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bridgetower(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
pixel_values=pixel_values,
pixel_mask=pixel_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
image_embeds=image_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
mlm_logits = self.mlm_score(outputs.text_features if return_dict else outputs[0])
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
labels = labels.to(mlm_logits.device)
masked_lm_loss = loss_fct(mlm_logits.view(-1, self.config.text_config.vocab_size), labels.view(-1))
if not return_dict:
output = tuple(mlm_logits)
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=mlm_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
BridgeTower Model transformer with a classifier head on top (a linear layer on top of the final hidden state of the
[CLS] token) for image-to-text matching.
"""
)
class BridgeTowerForImageAndTextRetrieval(BridgeTowerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bridgetower = BridgeTowerModel(config)
self.itm_score = BridgeTowerITMHead(config.hidden_size * 2)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[torch.LongTensor] = None,
) -> Union[SequenceClassifierOutput, tuple[torch.FloatTensor]]:
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
labels (`torch.LongTensor` of shape `(batch_size, 1)`, *optional*):
Labels for computing the image-text matching loss. 0 means the pairs don't match and 1 means they match.
The pairs with 0 will be skipped for calculation.
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerForImageAndTextRetrieval
>>> import requests
>>> from PIL import Image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> texts = ["An image of two cats chilling on a couch", "A football player scoring a goal"]
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
>>> model = BridgeTowerForImageAndTextRetrieval.from_pretrained("BridgeTower/bridgetower-base-itm-mlm")
>>> # forward pass
>>> scores = dict()
>>> for text in texts:
... # prepare inputs
... encoding = processor(image, text, return_tensors="pt")
... outputs = model(**encoding)
... scores[text] = outputs.logits[0, 1].item()
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bridgetower(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
pixel_values=pixel_values,
pixel_mask=pixel_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
image_embeds=image_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooler_output = outputs.pooler_output if return_dict else outputs[2]
logits = self.itm_score(pooler_output)
itm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
labels = labels.to(logits.device)
itm_loss = loss_fct(logits, labels)
if not return_dict:
output = tuple(logits)
return ((itm_loss,) + output) if itm_loss is not None else output
return SequenceClassifierOutput(
loss=itm_loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class BridgeTowerContrastiveHead(nn.Module):
def __init__(self, hidden_size, embed_size):
super().__init__()
self.fc = nn.Linear(hidden_size, embed_size)
def forward(self, x):
x = self.fc(x)
return x
@auto_docstring(
custom_intro="""
BridgeTower Model with a image-text contrastive head on top computing image-text contrastive loss.
"""
)
class BridgeTowerForContrastiveLearning(BridgeTowerPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bridgetower = BridgeTowerModel(config)
self.itc_text_head = BridgeTowerContrastiveHead(config.hidden_size, config.contrastive_hidden_size)
self.itc_image_head = BridgeTowerContrastiveHead(config.hidden_size, config.contrastive_hidden_size)
self.itc_cross_modal_head = BridgeTowerContrastiveHead(config.hidden_size * 2, config.contrastive_hidden_size)
self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
pixel_mask: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
image_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = True,
return_dict: Optional[bool] = None,
return_loss: Optional[bool] = None,
) -> Union[BridgeTowerContrastiveOutput, tuple[torch.FloatTensor]]:
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, num_patches, hidden_size)`, *optional*):
Optionally, instead of passing `pixel_values`, you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `pixel_values` into patch embeddings.
return_loss (`bool`, *optional*):
Whether or not to return the contrastive loss.
Examples:
```python
>>> from transformers import BridgeTowerProcessor, BridgeTowerForContrastiveLearning
>>> import requests
>>> from PIL import Image
>>> import torch
>>> image_urls = [
... "https://farm4.staticflickr.com/3395/3428278415_81c3e27f15_z.jpg",
... "http://images.cocodataset.org/val2017/000000039769.jpg",
... ]
>>> texts = ["two dogs in a car", "two cats sleeping on a couch"]
>>> images = [Image.open(requests.get(url, stream=True).raw) for url in image_urls]
>>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc")
>>> model = BridgeTowerForContrastiveLearning.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc")
>>> inputs = processor(images, texts, padding=True, return_tensors="pt")
>>> loss = model(**inputs, return_loss=True).loss
>>> inputs = processor(images, texts[::-1], padding=True, return_tensors="pt")
>>> loss_swapped = model(**inputs, return_loss=True).loss
>>> print("Loss", round(loss.item(), 4))
Loss 0.0019
>>> print("Loss with swapped images", round(loss_swapped.item(), 4))
Loss with swapped images 2.126
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bridgetower(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
pixel_values=pixel_values,
pixel_mask=pixel_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
image_embeds=image_embeds,
output_attentions=output_attentions,
output_hidden_states=True,
return_dict=return_dict,
)
pooler_output = outputs.pooler_output if return_dict else outputs[2]
hidden_states_txt, hidden_states_img, hidden_states_cross_modal = (
outputs.hidden_states if return_dict else outputs[3]
)
text_embeds = hidden_states_txt[-1]
image_embeds = hidden_states_img[-1]
image_embeds_with_ln = self.bridgetower.vision_model.visual.forward_post(image_embeds)
image_token_type_embeddings = self.bridgetower.token_type_embeddings(
torch.full((1,), 1, dtype=torch.long, device=self.bridgetower.token_type_embeddings.weight.device)
).expand_as(image_embeds_with_ln)
image_embeds = self.bridgetower.cross_modal_image_transform(image_embeds_with_ln) + image_token_type_embeddings
# normalized features
text_embeds = nn.functional.normalize(self.itc_text_head(text_embeds[:, 0, :]), dim=-1, p=2)
image_embeds = nn.functional.normalize(self.itc_image_head(image_embeds[:, 0, :]), dim=-1, p=2).to(
device=text_embeds.device
)
cross_embeds = nn.functional.normalize(self.itc_cross_modal_head(pooler_output), dim=-1, p=2).to(
device=text_embeds.device
)
logits = torch.stack([text_embeds, image_embeds, cross_embeds], dim=-2)
logit_scale = self.logit_scale.exp().to(device=text_embeds.device)
logits_text_to_image = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
logits_text_to_cross = torch.matmul(text_embeds, cross_embeds.t()) * logit_scale
logits_image_to_cross = torch.matmul(image_embeds, cross_embeds.t()) * logit_scale
itc_loss = None
if return_loss:
labels = torch.arange(len(logits), device=logits.device)
text_to_image_loss = nn.functional.cross_entropy(logits_text_to_image, labels)
text_to_cross_loss = nn.functional.cross_entropy(logits_text_to_cross, labels)
image_to_cross_loss = nn.functional.cross_entropy(logits_image_to_cross, labels)
itc_loss = (text_to_image_loss + text_to_cross_loss + image_to_cross_loss) / 3.0
if not return_dict:
output = (logits, text_embeds, image_embeds, cross_embeds) + outputs[3:]
return ((itc_loss,) + output) if itc_loss is not None else output
return BridgeTowerContrastiveOutput(
loss=itc_loss,
logits=logits,
text_embeds=text_embeds,
image_embeds=image_embeds,
cross_embeds=cross_embeds,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
| transformers/src/transformers/models/bridgetower/modeling_bridgetower.py/0 | {
"file_path": "transformers/src/transformers/models/bridgetower/modeling_bridgetower.py",
"repo_id": "transformers",
"token_count": 36405
} | 445 |
# coding=utf-8
# Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for Chinese-CLIP."""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
convert_to_rgb,
get_resize_output_image_size,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
infer_channel_dimension_format,
is_scaled_image,
make_list_of_images,
to_numpy_array,
valid_images,
validate_preprocess_arguments,
)
from ...utils import TensorType, filter_out_non_signature_kwargs, is_vision_available, logging
if is_vision_available():
import PIL
from ...utils.import_utils import requires
logger = logging.get_logger(__name__)
@requires(backends=("vision",))
class ChineseCLIPImageProcessor(BaseImageProcessor):
r"""
Constructs a Chinese-CLIP image processor.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
`do_resize` in the `preprocess` method.
size (`dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
method.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
do_center_crop (`bool`, *optional*, defaults to `True`):
Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
`preprocess` method.
crop_size (`dict[str, int]` *optional*, defaults to 224):
Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
method.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
the `preprocess` method.
rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
method.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
image_mean (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`):
Mean to use if normalizing the image. This is a float or list of floats the length of the number of
channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
image_std (`float` or `list[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`):
Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
Can be overridden by the `image_std` parameter in the `preprocess` method.
do_convert_rgb (`bool`, *optional*, defaults to `True`):
Whether to convert the image to RGB.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BICUBIC,
do_center_crop: bool = True,
crop_size: Optional[dict[str, int]] = None,
do_rescale: bool = True,
rescale_factor: Union[int, float] = 1 / 255,
do_normalize: bool = True,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: bool = True,
**kwargs,
) -> None:
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 224}
size = get_size_dict(size, default_to_square=False)
crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
crop_size = get_size_dict(crop_size)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_center_crop = do_center_crop
self.crop_size = crop_size
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
self.do_convert_rgb = do_convert_rgb
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BICUBIC,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input
image.
"""
size = get_size_dict(size, default_to_square=False)
output_size = get_resize_output_image_size(
image, size=(size["height"], size["width"]), default_to_square=False, input_data_format=input_data_format
)
return resize(
image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[int] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_convert_rgb: Optional[bool] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> PIL.Image.Image:
"""
Preprocess an image or batch of images.
Args:
images (`ImageInput`):
Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
passing in images with pixel values between 0 and 1, set `do_rescale=False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
the longest edge resized to keep the input aspect ratio.
resample (`int`, *optional*, defaults to `self.resample`):
Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
has an effect if `do_resize` is set to `True`.
do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
Whether to center crop the image.
crop_size (`dict[str, int]`, *optional*, defaults to `self.crop_size`):
Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Rescale factor to rescale the image by if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
`True`.
do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
Whether to convert the image to RGB.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format for the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- Unset: Use the channel dimension format of the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
resample = resample if resample is not None else self.resample
do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
crop_size = crop_size if crop_size is not None else self.crop_size
crop_size = get_size_dict(crop_size)
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
images = make_list_of_images(images)
if not valid_images(images):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray."
)
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_center_crop=do_center_crop,
crop_size=crop_size,
do_resize=do_resize,
size=size,
resample=resample,
)
if do_convert_rgb:
images = [convert_to_rgb(image) for image in images]
# All transformations expect numpy arrays.
images = [to_numpy_array(image) for image in images]
if do_rescale and is_scaled_image(images[0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(images[0])
all_images = []
for image in images:
if do_resize:
image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
if do_center_crop:
image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)
if do_rescale:
image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
if do_normalize:
image = self.normalize(
image=image, mean=image_mean, std=image_std, input_data_format=input_data_format
)
all_images.append(image)
images = [
to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
for image in all_images
]
data = {"pixel_values": images}
return BatchFeature(data=data, tensor_type=return_tensors)
__all__ = ["ChineseCLIPImageProcessor"]
| transformers/src/transformers/models/chinese_clip/image_processing_chinese_clip.py/0 | {
"file_path": "transformers/src/transformers/models/chinese_clip/image_processing_chinese_clip.py",
"repo_id": "transformers",
"token_count": 6520
} | 446 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""English Normalizer class for CLVP."""
import sys
if sys.version_info >= (3, 11):
# Atomic grouping support was only added to the core RE in Python 3.11
import re
else:
import regex as re
class EnglishNormalizer:
def __init__(self):
# List of (regular expression, replacement) pairs for abbreviations:
self._abbreviations = [
(re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1])
for x in [
("mrs", "misess"),
("mr", "mister"),
("dr", "doctor"),
("st", "saint"),
("co", "company"),
("jr", "junior"),
("maj", "major"),
("gen", "general"),
("drs", "doctors"),
("rev", "reverend"),
("lt", "lieutenant"),
("hon", "honorable"),
("sgt", "sergeant"),
("capt", "captain"),
("esq", "esquire"),
("ltd", "limited"),
("col", "colonel"),
("ft", "fort"),
]
]
self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
self.teens = [
"ten",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen",
]
self.tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
def number_to_words(self, num: int) -> str:
"""
Converts numbers(`int`) to words(`str`).
Please note that it only supports upto - "'nine hundred ninety-nine quadrillion, nine hundred ninety-nine
trillion, nine hundred ninety-nine billion, nine hundred ninety-nine million, nine hundred ninety-nine
thousand, nine hundred ninety-nine'" or `number_to_words(999_999_999_999_999_999)`.
"""
if num == 0:
return "zero"
elif num < 0:
return "minus " + self.number_to_words(abs(num))
elif num < 10:
return self.ones[num]
elif num < 20:
return self.teens[num - 10]
elif num < 100:
return self.tens[num // 10] + ("-" + self.number_to_words(num % 10) if num % 10 != 0 else "")
elif num < 1000:
return (
self.ones[num // 100] + " hundred" + (" " + self.number_to_words(num % 100) if num % 100 != 0 else "")
)
elif num < 1_000_000:
return (
self.number_to_words(num // 1000)
+ " thousand"
+ (", " + self.number_to_words(num % 1000) if num % 1000 != 0 else "")
)
elif num < 1_000_000_000:
return (
self.number_to_words(num // 1_000_000)
+ " million"
+ (", " + self.number_to_words(num % 1_000_000) if num % 1_000_000 != 0 else "")
)
elif num < 1_000_000_000_000:
return (
self.number_to_words(num // 1_000_000_000)
+ " billion"
+ (", " + self.number_to_words(num % 1_000_000_000) if num % 1_000_000_000 != 0 else "")
)
elif num < 1_000_000_000_000_000:
return (
self.number_to_words(num // 1_000_000_000_000)
+ " trillion"
+ (", " + self.number_to_words(num % 1_000_000_000_000) if num % 1_000_000_000_000 != 0 else "")
)
elif num < 1_000_000_000_000_000_000:
return (
self.number_to_words(num // 1_000_000_000_000_000)
+ " quadrillion"
+ (
", " + self.number_to_words(num % 1_000_000_000_000_000)
if num % 1_000_000_000_000_000 != 0
else ""
)
)
else:
return "number out of range"
def convert_to_ascii(self, text: str) -> str:
"""
Converts unicode to ascii
"""
return text.encode("ascii", "ignore").decode("utf-8")
def _expand_dollars(self, m: str) -> str:
"""
This method is used to expand numerical dollar values into spoken words.
"""
match = m.group(1)
parts = match.split(".")
if len(parts) > 2:
return match + " dollars" # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = "dollar" if dollars == 1 else "dollars"
cent_unit = "cent" if cents == 1 else "cents"
return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = "dollar" if dollars == 1 else "dollars"
return "%s %s" % (dollars, dollar_unit)
elif cents:
cent_unit = "cent" if cents == 1 else "cents"
return "%s %s" % (cents, cent_unit)
else:
return "zero dollars"
def _remove_commas(self, m: str) -> str:
"""
This method is used to remove commas from sentences.
"""
return m.group(1).replace(",", "")
def _expand_decimal_point(self, m: str) -> str:
"""
This method is used to expand '.' into spoken word ' point '.
"""
return m.group(1).replace(".", " point ")
def _expand_ordinal(self, num: str) -> str:
"""
This method is used to expand ordinals such as '1st', '2nd' into spoken words.
"""
ordinal_suffixes = {1: "st", 2: "nd", 3: "rd"}
num = int(num.group(0)[:-2])
if 10 <= num % 100 and num % 100 <= 20:
suffix = "th"
else:
suffix = ordinal_suffixes.get(num % 10, "th")
return self.number_to_words(num) + suffix
def _expand_number(self, m: str) -> str:
"""
This method acts as a preprocessing step for numbers between 1000 and 3000 (same as the original repository,
link :
https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/utils/tokenizer.py#L86)
"""
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return "two thousand"
elif num > 2000 and num < 2010:
return "two thousand " + self.number_to_words(num % 100)
elif num % 100 == 0:
return self.number_to_words(num // 100) + " hundred"
else:
return self.number_to_words(num)
else:
return self.number_to_words(num)
def normalize_numbers(self, text: str) -> str:
"""
This method is used to normalize numbers within a text such as converting the numbers to words, removing
commas, etc.
"""
text = re.sub(r"([0-9][0-9,]+[0-9])", self._remove_commas, text)
text = re.sub(r"£([0-9,]*[0-9])", r"\1 pounds", text)
text = re.sub(r"\$([0-9.,]*[0-9])", self._expand_dollars, text)
text = re.sub(r"([0-9]++\.[0-9]+)", self._expand_decimal_point, text)
text = re.sub(r"[0-9]++(st|nd|rd|th)", self._expand_ordinal, text)
text = re.sub(r"[0-9]+", self._expand_number, text)
return text
def expand_abbreviations(self, text: str) -> str:
"""
Expands the abbreviate words.
"""
for regex, replacement in self._abbreviations:
text = re.sub(regex, replacement, text)
return text
def collapse_whitespace(self, text: str) -> str:
"""
Removes multiple whitespaces
"""
return re.sub(re.compile(r"\s+"), " ", text)
def __call__(self, text):
"""
Converts text to ascii, numbers / number-like quantities to their spelt-out counterparts and expands
abbreviations
"""
text = self.convert_to_ascii(text)
text = text.lower()
text = self.normalize_numbers(text)
text = self.expand_abbreviations(text)
text = self.collapse_whitespace(text)
text = text.replace('"', "")
return text
| transformers/src/transformers/models/clvp/number_normalizer.py/0 | {
"file_path": "transformers/src/transformers/models/clvp/number_normalizer.py",
"repo_id": "transformers",
"token_count": 4428
} | 447 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ConvBERT checkpoint."""
import argparse
from transformers import ConvBertConfig, ConvBertModel, TFConvBertModel, load_tf_weights_in_convbert
from transformers.utils import logging
logging.set_verbosity_info()
def convert_orig_tf1_checkpoint_to_pytorch(tf_checkpoint_path, convbert_config_file, pytorch_dump_path):
conf = ConvBertConfig.from_json_file(convbert_config_file)
model = ConvBertModel(conf)
model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path)
model.save_pretrained(pytorch_dump_path)
tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True)
tf_model.save_pretrained(pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--convbert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ConvBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_orig_tf1_checkpoint_to_pytorch(args.tf_checkpoint_path, args.convbert_config_file, args.pytorch_dump_path)
| transformers/src/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py/0 | {
"file_path": "transformers/src/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py",
"repo_id": "transformers",
"token_count": 749
} | 448 |
# coding=utf-8
# Copyright 2023 Meta Platforms, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch ConvNextV2 model."""
from typing import Optional, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from ...utils.backbone_utils import BackboneMixin
from .configuration_convnextv2 import ConvNextV2Config
logger = logging.get_logger(__name__)
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
argument.
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->ConvNextV2
class ConvNextV2DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
class ConvNextV2GRN(nn.Module):
"""GRN (Global Response Normalization) layer"""
def __init__(self, dim: int):
super().__init__()
self.weight = nn.Parameter(torch.zeros(1, 1, 1, dim))
self.bias = nn.Parameter(torch.zeros(1, 1, 1, dim))
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
# Compute and normalize global spatial feature maps
global_features = torch.linalg.vector_norm(hidden_states, ord=2, dim=(1, 2), keepdim=True)
norm_features = global_features / (global_features.mean(dim=-1, keepdim=True) + 1e-6)
hidden_states = self.weight * (hidden_states * norm_features) + self.bias + hidden_states
return hidden_states
# Copied from transformers.models.convnext.modeling_convnext.ConvNextLayerNorm with ConvNext->ConvNextV2
class ConvNextV2LayerNorm(nn.Module):
r"""LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height,
width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(torch.ones(normalized_shape))
self.bias = nn.Parameter(torch.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError(f"Unsupported data format: {self.data_format}")
self.normalized_shape = (normalized_shape,)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.data_format == "channels_last":
x = torch.nn.functional.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
elif self.data_format == "channels_first":
input_dtype = x.dtype
x = x.float()
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = x.to(dtype=input_dtype)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
# Copied from transformers.models.convnext.modeling_convnext.ConvNextEmbeddings with ConvNext->ConvNextV2
class ConvNextV2Embeddings(nn.Module):
"""This class is comparable to (and inspired by) the SwinEmbeddings class
found in src/transformers/models/swin/modeling_swin.py.
"""
def __init__(self, config):
super().__init__()
self.patch_embeddings = nn.Conv2d(
config.num_channels, config.hidden_sizes[0], kernel_size=config.patch_size, stride=config.patch_size
)
self.layernorm = ConvNextV2LayerNorm(config.hidden_sizes[0], eps=1e-6, data_format="channels_first")
self.num_channels = config.num_channels
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embeddings = self.patch_embeddings(pixel_values)
embeddings = self.layernorm(embeddings)
return embeddings
class ConvNextV2Layer(nn.Module):
"""This corresponds to the `Block` class in the original implementation.
There are two equivalent implementations: [DwConv, LayerNorm (channels_first), Conv, GELU,1x1 Conv]; all in (N, C,
H, W) (2) [DwConv, Permute to (N, H, W, C), LayerNorm (channels_last), Linear, GELU, Linear]; Permute back
The authors used (2) as they find it slightly faster in PyTorch.
Args:
config ([`ConvNextV2Config`]): Model configuration class.
dim (`int`): Number of input channels.
drop_path (`float`): Stochastic depth rate. Default: 0.0.
"""
def __init__(self, config, dim, drop_path=0):
super().__init__()
# depthwise conv
self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
self.layernorm = ConvNextV2LayerNorm(dim, eps=1e-6)
# pointwise/1x1 convs, implemented with linear layers
self.pwconv1 = nn.Linear(dim, 4 * dim)
self.act = ACT2FN[config.hidden_act]
self.grn = ConvNextV2GRN(4 * dim)
self.pwconv2 = nn.Linear(4 * dim, dim)
self.drop_path = ConvNextV2DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
input = hidden_states
x = self.dwconv(hidden_states)
# (batch_size, num_channels, height, width) -> (batch_size, height, width, num_channels)
x = x.permute(0, 2, 3, 1)
x = self.layernorm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.grn(x)
x = self.pwconv2(x)
# (batch_size, height, width, num_channels) -> (batch_size, num_channels, height, width)
x = x.permute(0, 3, 1, 2)
x = input + self.drop_path(x)
return x
# Copied from transformers.models.convnext.modeling_convnext.ConvNextStage with ConvNeXT->ConvNeXTV2, ConvNext->ConvNextV2
class ConvNextV2Stage(nn.Module):
"""ConvNeXTV2 stage, consisting of an optional downsampling layer + multiple residual blocks.
Args:
config ([`ConvNextV2Config`]): Model configuration class.
in_channels (`int`): Number of input channels.
out_channels (`int`): Number of output channels.
depth (`int`): Number of residual blocks.
drop_path_rates(`list[float]`): Stochastic depth rates for each layer.
"""
def __init__(self, config, in_channels, out_channels, kernel_size=2, stride=2, depth=2, drop_path_rates=None):
super().__init__()
if in_channels != out_channels or stride > 1:
self.downsampling_layer = nn.Sequential(
ConvNextV2LayerNorm(in_channels, eps=1e-6, data_format="channels_first"),
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride),
)
else:
self.downsampling_layer = nn.Identity()
drop_path_rates = drop_path_rates or [0.0] * depth
self.layers = nn.Sequential(
*[ConvNextV2Layer(config, dim=out_channels, drop_path=drop_path_rates[j]) for j in range(depth)]
)
def forward(self, hidden_states: torch.FloatTensor) -> torch.Tensor:
hidden_states = self.downsampling_layer(hidden_states)
hidden_states = self.layers(hidden_states)
return hidden_states
# Copied from transformers.models.convnext.modeling_convnext.ConvNextEncoder with ConvNext->ConvNextV2
class ConvNextV2Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.stages = nn.ModuleList()
drop_path_rates = [
x.tolist()
for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu").split(config.depths)
]
prev_chs = config.hidden_sizes[0]
for i in range(config.num_stages):
out_chs = config.hidden_sizes[i]
stage = ConvNextV2Stage(
config,
in_channels=prev_chs,
out_channels=out_chs,
stride=2 if i > 0 else 1,
depth=config.depths[i],
drop_path_rates=drop_path_rates[i],
)
self.stages.append(stage)
prev_chs = out_chs
def forward(
self,
hidden_states: torch.FloatTensor,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple, BaseModelOutputWithNoAttention]:
all_hidden_states = () if output_hidden_states else None
for i, layer_module in enumerate(self.stages):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = layer_module(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
)
@auto_docstring
class ConvNextV2PreTrainedModel(PreTrainedModel):
config: ConvNextV2Config
base_model_prefix = "convnextv2"
main_input_name = "pixel_values"
_no_split_modules = ["ConvNextV2Layer"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, ConvNextV2LayerNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, ConvNextV2GRN):
module.weight.data.zero_()
module.bias.data.zero_()
@auto_docstring
# Copied from transformers.models.convnext.modeling_convnext.ConvNextModel with CONVNEXT->CONVNEXTV2, ConvNext->ConvNextV2
class ConvNextV2Model(ConvNextV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = ConvNextV2Embeddings(config)
self.encoder = ConvNextV2Encoder(config)
# final layernorm layer
self.layernorm = nn.LayerNorm(config.hidden_sizes[-1], eps=config.layer_norm_eps)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.embeddings(pixel_values)
encoder_outputs = self.encoder(
embedding_output,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
# global average pooling, (N, C, H, W) -> (N, C)
pooled_output = self.layernorm(last_hidden_state.mean([-2, -1]))
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
ConvNextV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
"""
)
# Copied from transformers.models.convnext.modeling_convnext.ConvNextForImageClassification with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,convnext->convnextv2
class ConvNextV2ForImageClassification(ConvNextV2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.convnextv2 = ConvNextV2Model(config)
# Classifier head
self.classifier = (
nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.convnextv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict)
pooled_output = outputs.pooler_output if return_dict else outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
)
@auto_docstring(
custom_intro="""
ConvNeXT V2 backbone, to be used with frameworks like DETR and MaskFormer.
"""
)
# Copied from transformers.models.convnext.modeling_convnext.ConvNextBackbone with CONVNEXT->CONVNEXTV2,ConvNext->ConvNextV2,facebook/convnext-tiny-224->facebook/convnextv2-tiny-1k-224
class ConvNextV2Backbone(ConvNextV2PreTrainedModel, BackboneMixin):
def __init__(self, config):
super().__init__(config)
super()._init_backbone(config)
self.embeddings = ConvNextV2Embeddings(config)
self.encoder = ConvNextV2Encoder(config)
self.num_features = [config.hidden_sizes[0]] + config.hidden_sizes
# Add layer norms to hidden states of out_features
hidden_states_norms = {}
for stage, num_channels in zip(self._out_features, self.channels):
hidden_states_norms[stage] = ConvNextV2LayerNorm(num_channels, data_format="channels_first")
self.hidden_states_norms = nn.ModuleDict(hidden_states_norms)
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224")
>>> model = AutoBackbone.from_pretrained("facebook/convnextv2-tiny-1k-224")
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
embedding_output = self.embeddings(pixel_values)
outputs = self.encoder(
embedding_output,
output_hidden_states=True,
return_dict=return_dict,
)
hidden_states = outputs.hidden_states if return_dict else outputs[1]
feature_maps = ()
for stage, hidden_state in zip(self.stage_names, hidden_states):
if stage in self.out_features:
hidden_state = self.hidden_states_norms[stage](hidden_state)
feature_maps += (hidden_state,)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=hidden_states if output_hidden_states else None,
attentions=None,
)
__all__ = ["ConvNextV2ForImageClassification", "ConvNextV2Model", "ConvNextV2PreTrainedModel", "ConvNextV2Backbone"]
| transformers/src/transformers/models/convnextv2/modeling_convnextv2.py/0 | {
"file_path": "transformers/src/transformers/models/convnextv2/modeling_convnextv2.py",
"repo_id": "transformers",
"token_count": 8793
} | 449 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DAB-DETR model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import verify_backbone_config_arguments
from ..auto import CONFIG_MAPPING
logger = logging.get_logger(__name__)
class DabDetrConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`DabDetrModel`]. It is used to instantiate
a DAB-DETR model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the DAB-DETR
[IDEA-Research/dab_detr-base](https://huggingface.co/IDEA-Research/dab_detr-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
use_timm_backbone (`bool`, *optional*, defaults to `True`):
Whether or not to use the `timm` library for the backbone. If set to `False`, will use the [`AutoBackbone`]
API.
backbone_config (`PretrainedConfig` or `dict`, *optional*):
The configuration of the backbone model. Only used in case `use_timm_backbone` is set to `False` in which
case it will default to `ResNetConfig()`.
backbone (`str`, *optional*, defaults to `"resnet50"`):
Name of backbone to use when `backbone_config` is `None`. If `use_pretrained_backbone` is `True`, this
will load the corresponding pretrained weights from the timm or transformers library. If `use_pretrained_backbone`
is `False`, this loads the backbone's config and uses that to initialize the backbone with random weights.
use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
Whether to use pretrained weights for the backbone.
backbone_kwargs (`dict`, *optional*):
Keyword arguments to be passed to AutoBackbone when loading from a checkpoint
e.g. `{'out_indices': (0, 1, 2, 3)}`. Cannot be specified if `backbone_config` is set.
num_queries (`int`, *optional*, defaults to 300):
Number of object queries, i.e. detection slots. This is the maximal number of objects
[`DabDetrModel`] can detect in a single image. For COCO, we recommend 100 queries.
encoder_layers (`int`, *optional*, defaults to 6):
Number of encoder layers.
encoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in encoder.
encoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_layers (`int`, *optional*, defaults to 6):
Number of decoder layers.
decoder_ffn_dim (`int`, *optional*, defaults to 2048):
Dimension of the "intermediate" (often named feed-forward) layer in decoder.
decoder_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Indicates whether the transformer model architecture is an encoder-decoder or not.
activation_function (`str` or `function`, *optional*, defaults to `"prelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_size (`int`, *optional*, defaults to 256):
This parameter is a general dimension parameter, defining dimensions for components such as the encoder layer and projection parameters in the decoder layer, among others.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
init_xavier_std (`float`, *optional*, defaults to 1.0):
The scaling factor used for the Xavier initialization gain in the HM Attention map module.
auxiliary_loss (`bool`, *optional*, defaults to `False`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
dilation (`bool`, *optional*, defaults to `False`):
Whether to replace stride with dilation in the last convolutional block (DC5). Only supported when `use_timm_backbone` = `True`.
class_cost (`float`, *optional*, defaults to 2):
Relative weight of the classification error in the Hungarian matching cost.
bbox_cost (`float`, *optional*, defaults to 5):
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
giou_cost (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
cls_loss_coefficient (`float`, *optional*, defaults to 2):
Relative weight of the classification loss in the object detection loss function.
bbox_loss_coefficient (`float`, *optional*, defaults to 5):
Relative weight of the L1 bounding box loss in the object detection loss.
giou_loss_coefficient (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss in the object detection loss.
focal_alpha (`float`, *optional*, defaults to 0.25):
Alpha parameter in the focal loss.
temperature_height (`int`, *optional*, defaults to 20):
Temperature parameter to tune the flatness of positional attention (HEIGHT)
temperature_width (`int`, *optional*, defaults to 20):
Temperature parameter to tune the flatness of positional attention (WIDTH)
query_dim (`int`, *optional*, defaults to 4):
Query dimension parameter represents the size of the output vector.
random_refpoints_xy (`bool`, *optional*, defaults to `False`):
Whether to fix the x and y coordinates of the anchor boxes with random initialization.
keep_query_pos (`bool`, *optional*, defaults to `False`):
Whether to concatenate the projected positional embedding from the object query into the original query (key) in every decoder layer.
num_patterns (`int`, *optional*, defaults to 0):
Number of pattern embeddings.
normalize_before (`bool`, *optional*, defaults to `False`):
Whether we use a normalization layer in the Encoder or not.
sine_position_embedding_scale (`float`, *optional*, defaults to 'None'):
Scaling factor applied to the normalized positional encodings.
initializer_bias_prior_prob (`float`, *optional*):
The prior probability used by the bias initializer to initialize biases for `enc_score_head` and `class_embed`.
If `None`, `prior_prob` computed as `prior_prob = 1 / (num_labels + 1)` while initializing model weights.
Examples:
```python
>>> from transformers import DabDetrConfig, DabDetrModel
>>> # Initializing a DAB-DETR IDEA-Research/dab_detr-base style configuration
>>> configuration = DabDetrConfig()
>>> # Initializing a model (with random weights) from the IDEA-Research/dab_detr-base style configuration
>>> model = DabDetrModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "dab-detr"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"num_attention_heads": "encoder_attention_heads",
}
def __init__(
self,
use_timm_backbone=True,
backbone_config=None,
backbone="resnet50",
use_pretrained_backbone=True,
backbone_kwargs=None,
num_queries=300,
encoder_layers=6,
encoder_ffn_dim=2048,
encoder_attention_heads=8,
decoder_layers=6,
decoder_ffn_dim=2048,
decoder_attention_heads=8,
is_encoder_decoder=True,
activation_function="prelu",
hidden_size=256,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
init_xavier_std=1.0,
auxiliary_loss=False,
dilation=False,
class_cost=2,
bbox_cost=5,
giou_cost=2,
cls_loss_coefficient=2,
bbox_loss_coefficient=5,
giou_loss_coefficient=2,
focal_alpha=0.25,
temperature_height=20,
temperature_width=20,
query_dim=4,
random_refpoints_xy=False,
keep_query_pos=False,
num_patterns=0,
normalize_before=False,
sine_position_embedding_scale=None,
initializer_bias_prior_prob=None,
**kwargs,
):
if query_dim != 4:
raise ValueError("The query dimensions has to be 4.")
# We default to values which were previously hard-coded in the model. This enables configurability of the config
# while keeping the default behavior the same.
if use_timm_backbone and backbone_kwargs is None:
backbone_kwargs = {}
if dilation:
backbone_kwargs["output_stride"] = 16
backbone_kwargs["out_indices"] = [1, 2, 3, 4]
backbone_kwargs["in_chans"] = 3 # num_channels
# Backwards compatibility
elif not use_timm_backbone and backbone in (None, "resnet50"):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
backbone_config = CONFIG_MAPPING["resnet"](out_features=["stage4"])
elif isinstance(backbone_config, dict):
backbone_model_type = backbone_config.get("model_type")
config_class = CONFIG_MAPPING[backbone_model_type]
backbone_config = config_class.from_dict(backbone_config)
backbone = None
# set timm attributes to None
dilation = None
verify_backbone_config_arguments(
use_timm_backbone=use_timm_backbone,
use_pretrained_backbone=use_pretrained_backbone,
backbone=backbone,
backbone_config=backbone_config,
backbone_kwargs=backbone_kwargs,
)
self.use_timm_backbone = use_timm_backbone
self.backbone_config = backbone_config
self.num_queries = num_queries
self.hidden_size = hidden_size
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.init_xavier_std = init_xavier_std
self.num_hidden_layers = encoder_layers
self.auxiliary_loss = auxiliary_loss
self.backbone = backbone
self.use_pretrained_backbone = use_pretrained_backbone
self.backbone_kwargs = backbone_kwargs
# Hungarian matcher
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
# Loss coefficients
self.cls_loss_coefficient = cls_loss_coefficient
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.focal_alpha = focal_alpha
self.query_dim = query_dim
self.random_refpoints_xy = random_refpoints_xy
self.keep_query_pos = keep_query_pos
self.num_patterns = num_patterns
self.normalize_before = normalize_before
self.temperature_width = temperature_width
self.temperature_height = temperature_height
self.sine_position_embedding_scale = sine_position_embedding_scale
self.initializer_bias_prior_prob = initializer_bias_prior_prob
super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
@property
def sub_configs(self):
return (
{"backbone_config": type(self.backbone_config)}
if getattr(self, "backbone_config", None) is not None
else {}
)
__all__ = ["DabDetrConfig"]
| transformers/src/transformers/models/dab_detr/configuration_dab_detr.py/0 | {
"file_path": "transformers/src/transformers/models/dab_detr/configuration_dab_detr.py",
"repo_id": "transformers",
"token_count": 5314
} | 450 |
# coding=utf-8
# Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF 2.0 DeBERTa-v2 model."""
from __future__ import annotations
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFModelInputType,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFTokenClassificationLoss,
get_initializer,
keras,
unpack_inputs,
)
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_deberta_v2 import DebertaV2Config
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DebertaV2Config"
_CHECKPOINT_FOR_DOC = "kamalkraj/deberta-v2-xlarge"
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaContextPooler with Deberta->DebertaV2
class TFDebertaV2ContextPooler(keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(config.pooler_hidden_size, name="dense")
self.dropout = TFDebertaV2StableDropout(config.pooler_dropout, name="dropout")
self.config = config
def call(self, hidden_states, training: bool = False):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
context_token = hidden_states[:, 0]
context_token = self.dropout(context_token, training=training)
pooled_output = self.dense(context_token)
pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)
return pooled_output
@property
def output_dim(self) -> int:
return self.config.hidden_size
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "dense", None) is not None:
with tf.name_scope(self.dense.name):
self.dense.build([None, None, self.config.pooler_hidden_size])
if getattr(self, "dropout", None) is not None:
with tf.name_scope(self.dropout.name):
self.dropout.build(None)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaXSoftmax with Deberta->DebertaV2
class TFDebertaV2XSoftmax(keras.layers.Layer):
"""
Masked Softmax which is optimized for saving memory
Args:
input (`tf.Tensor`): The input tensor that will apply softmax.
mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
dim (int): The dimension that will apply softmax
"""
def __init__(self, axis=-1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
def call(self, inputs: tf.Tensor, mask: tf.Tensor):
rmask = tf.logical_not(tf.cast(mask, tf.bool))
output = tf.where(rmask, tf.cast(float("-inf"), dtype=self.compute_dtype), inputs)
output = stable_softmax(tf.cast(output, dtype=tf.float32), self.axis)
output = tf.where(rmask, 0.0, output)
return output
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaStableDropout with Deberta->DebertaV2
class TFDebertaV2StableDropout(keras.layers.Layer):
"""
Optimized dropout module for stabilizing the training
Args:
drop_prob (float): the dropout probabilities
"""
def __init__(self, drop_prob, **kwargs):
super().__init__(**kwargs)
self.drop_prob = drop_prob
@tf.custom_gradient
def xdropout(self, inputs):
"""
Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
"""
mask = tf.cast(
1
- tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)),
tf.bool,
)
scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=self.compute_dtype)
if self.drop_prob > 0:
inputs = tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), inputs) * scale
def grad(upstream):
if self.drop_prob > 0:
return tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), upstream) * scale
else:
return upstream
return inputs, grad
def call(self, inputs: tf.Tensor, training: tf.Tensor = False):
if training:
return self.xdropout(inputs)
return inputs
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaSelfOutput with Deberta->DebertaV2
class TFDebertaV2SelfOutput(keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(config.hidden_size, name="dense")
self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
self.config = config
def call(self, hidden_states, input_tensor, training: bool = False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "dense", None) is not None:
with tf.name_scope(self.dense.name):
self.dense.build([None, None, self.config.hidden_size])
if getattr(self, "LayerNorm", None) is not None:
with tf.name_scope(self.LayerNorm.name):
self.LayerNorm.build([None, None, self.config.hidden_size])
if getattr(self, "dropout", None) is not None:
with tf.name_scope(self.dropout.name):
self.dropout.build(None)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaAttention with Deberta->DebertaV2
class TFDebertaV2Attention(keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.self = TFDebertaV2DisentangledSelfAttention(config, name="self")
self.dense_output = TFDebertaV2SelfOutput(config, name="output")
self.config = config
def call(
self,
input_tensor: tf.Tensor,
attention_mask: tf.Tensor,
query_states: tf.Tensor | None = None,
relative_pos: tf.Tensor | None = None,
rel_embeddings: tf.Tensor | None = None,
output_attentions: bool = False,
training: bool = False,
) -> tuple[tf.Tensor]:
self_outputs = self.self(
hidden_states=input_tensor,
attention_mask=attention_mask,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
output_attentions=output_attentions,
training=training,
)
if query_states is None:
query_states = input_tensor
attention_output = self.dense_output(
hidden_states=self_outputs[0], input_tensor=query_states, training=training
)
output = (attention_output,) + self_outputs[1:]
return output
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "self", None) is not None:
with tf.name_scope(self.self.name):
self.self.build(None)
if getattr(self, "dense_output", None) is not None:
with tf.name_scope(self.dense_output.name):
self.dense_output.build(None)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaIntermediate with Deberta->DebertaV2
class TFDebertaV2Intermediate(keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
self.config = config
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "dense", None) is not None:
with tf.name_scope(self.dense.name):
self.dense.build([None, None, self.config.hidden_size])
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaOutput with Deberta->DebertaV2
class TFDebertaV2Output(keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.dense = keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
self.config = config
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "dense", None) is not None:
with tf.name_scope(self.dense.name):
self.dense.build([None, None, self.config.intermediate_size])
if getattr(self, "LayerNorm", None) is not None:
with tf.name_scope(self.LayerNorm.name):
self.LayerNorm.build([None, None, self.config.hidden_size])
if getattr(self, "dropout", None) is not None:
with tf.name_scope(self.dropout.name):
self.dropout.build(None)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaLayer with Deberta->DebertaV2
class TFDebertaV2Layer(keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.attention = TFDebertaV2Attention(config, name="attention")
self.intermediate = TFDebertaV2Intermediate(config, name="intermediate")
self.bert_output = TFDebertaV2Output(config, name="output")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
query_states: tf.Tensor | None = None,
relative_pos: tf.Tensor | None = None,
rel_embeddings: tf.Tensor | None = None,
output_attentions: bool = False,
training: bool = False,
) -> tuple[tf.Tensor]:
attention_outputs = self.attention(
input_tensor=hidden_states,
attention_mask=attention_mask,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
output_attentions=output_attentions,
training=training,
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(hidden_states=attention_output)
layer_output = self.bert_output(
hidden_states=intermediate_output, input_tensor=attention_output, training=training
)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "attention", None) is not None:
with tf.name_scope(self.attention.name):
self.attention.build(None)
if getattr(self, "intermediate", None) is not None:
with tf.name_scope(self.intermediate.name):
self.intermediate.build(None)
if getattr(self, "bert_output", None) is not None:
with tf.name_scope(self.bert_output.name):
self.bert_output.build(None)
class TFDebertaV2ConvLayer(keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.kernel_size = getattr(config, "conv_kernel_size", 3)
# groups = getattr(config, "conv_groups", 1)
self.conv_act = get_tf_activation(getattr(config, "conv_act", "tanh"))
self.padding = (self.kernel_size - 1) // 2
self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
self.config = config
def build(self, input_shape=None):
if self.built:
return
self.built = True
with tf.name_scope("conv"):
self.conv_kernel = self.add_weight(
name="kernel",
shape=[self.kernel_size, self.config.hidden_size, self.config.hidden_size],
initializer=get_initializer(self.config.initializer_range),
)
self.conv_bias = self.add_weight(
name="bias", shape=[self.config.hidden_size], initializer=tf.zeros_initializer()
)
if getattr(self, "LayerNorm", None) is not None:
with tf.name_scope(self.LayerNorm.name):
self.LayerNorm.build([None, None, self.config.hidden_size])
if getattr(self, "dropout", None) is not None:
with tf.name_scope(self.dropout.name):
self.dropout.build(None)
def call(
self, hidden_states: tf.Tensor, residual_states: tf.Tensor, input_mask: tf.Tensor, training: bool = False
) -> tf.Tensor:
out = tf.nn.conv2d(
tf.expand_dims(hidden_states, 1),
tf.expand_dims(self.conv_kernel, 0),
strides=1,
padding=[[0, 0], [0, 0], [self.padding, self.padding], [0, 0]],
)
out = tf.squeeze(tf.nn.bias_add(out, self.conv_bias), 1)
rmask = tf.cast(1 - input_mask, tf.bool)
out = tf.where(tf.broadcast_to(tf.expand_dims(rmask, -1), shape_list(out)), 0.0, out)
out = self.dropout(out, training=training)
out = self.conv_act(out)
layer_norm_input = residual_states + out
output = self.LayerNorm(layer_norm_input)
if input_mask is None:
output_states = output
else:
if len(shape_list(input_mask)) != len(shape_list(layer_norm_input)):
if len(shape_list(input_mask)) == 4:
input_mask = tf.squeeze(tf.squeeze(input_mask, axis=1), axis=1)
input_mask = tf.cast(tf.expand_dims(input_mask, axis=2), dtype=self.compute_dtype)
output_states = output * input_mask
return output_states
class TFDebertaV2Encoder(keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.layer = [TFDebertaV2Layer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
self.relative_attention = getattr(config, "relative_attention", False)
self.config = config
if self.relative_attention:
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.position_buckets = getattr(config, "position_buckets", -1)
self.pos_ebd_size = self.max_relative_positions * 2
if self.position_buckets > 0:
self.pos_ebd_size = self.position_buckets * 2
self.norm_rel_ebd = [x.strip() for x in getattr(config, "norm_rel_ebd", "none").lower().split("|")]
if "layer_norm" in self.norm_rel_ebd:
self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.conv = TFDebertaV2ConvLayer(config, name="conv") if getattr(config, "conv_kernel_size", 0) > 0 else None
def build(self, input_shape=None):
if self.built:
return
self.built = True
if self.relative_attention:
self.rel_embeddings = self.add_weight(
name="rel_embeddings.weight",
shape=[self.pos_ebd_size, self.config.hidden_size],
initializer=get_initializer(self.config.initializer_range),
)
if getattr(self, "conv", None) is not None:
with tf.name_scope(self.conv.name):
self.conv.build(None)
if getattr(self, "LayerNorm", None) is not None:
with tf.name_scope(self.LayerNorm.name):
self.LayerNorm.build([None, self.config.hidden_size])
if getattr(self, "layer", None) is not None:
for layer in self.layer:
with tf.name_scope(layer.name):
layer.build(None)
def get_rel_embedding(self):
rel_embeddings = self.rel_embeddings if self.relative_attention else None
if rel_embeddings is not None and ("layer_norm" in self.norm_rel_ebd):
rel_embeddings = self.LayerNorm(rel_embeddings)
return rel_embeddings
def get_attention_mask(self, attention_mask):
if len(shape_list(attention_mask)) <= 2:
extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)
attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)
attention_mask = tf.cast(attention_mask, tf.uint8)
elif len(shape_list(attention_mask)) == 3:
attention_mask = tf.expand_dims(attention_mask, 1)
return attention_mask
def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
if self.relative_attention and relative_pos is None:
q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]
relative_pos = build_relative_position(
q,
shape_list(hidden_states)[-2],
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
)
return relative_pos
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
query_states: tf.Tensor | None = None,
relative_pos: tf.Tensor | None = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = True,
training: bool = False,
) -> TFBaseModelOutput | tuple[tf.Tensor]:
if len(shape_list(attention_mask)) <= 2:
input_mask = attention_mask
else:
input_mask = tf.cast(tf.math.reduce_sum(attention_mask, axis=-2) > 0, dtype=tf.uint8)
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
attention_mask = self.get_attention_mask(attention_mask)
relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
next_kv = hidden_states
rel_embeddings = self.get_rel_embedding()
output_states = next_kv
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
layer_outputs = layer_module(
hidden_states=next_kv,
attention_mask=attention_mask,
query_states=query_states,
relative_pos=relative_pos,
rel_embeddings=rel_embeddings,
output_attentions=output_attentions,
training=training,
)
output_states = layer_outputs[0]
if i == 0 and self.conv is not None:
output_states = self.conv(hidden_states, output_states, input_mask)
next_kv = output_states
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (output_states,)
if not return_dict:
return tuple(v for v in [output_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=output_states, hidden_states=all_hidden_states, attentions=all_attentions
)
def make_log_bucket_position(relative_pos, bucket_size, max_position):
sign = tf.math.sign(relative_pos)
mid = bucket_size // 2
abs_pos = tf.where((relative_pos < mid) & (relative_pos > -mid), mid - 1, tf.math.abs(relative_pos))
log_pos = tf.math.ceil(
tf.cast(tf.math.log(abs_pos / mid), tf.float32)
/ tf.cast(tf.math.log((max_position - 1) / mid), tf.float32)
* tf.cast(mid - 1, tf.float32) # in graph mode
) + tf.cast(mid, tf.float32)
bucket_pos = tf.cast(
tf.where(abs_pos <= mid, tf.cast(relative_pos, tf.float32), log_pos * tf.cast(sign, tf.float32)), tf.int32
)
return bucket_pos
def build_relative_position(query_size, key_size, bucket_size=-1, max_position=-1):
"""
Build relative position according to the query and key
We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
\\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
P_k\\)
Args:
query_size (int): the length of query
key_size (int): the length of key
bucket_size (int): the size of position bucket
max_position (int): the maximum allowed absolute position
Return:
`tf.Tensor`: A tensor with shape [1, query_size, key_size]
"""
q_ids = tf.range(query_size, dtype=tf.int32)
k_ids = tf.range(key_size, dtype=tf.int32)
rel_pos_ids = q_ids[:, None] - tf.tile(tf.expand_dims(k_ids, axis=0), [shape_list(q_ids)[0], 1])
if bucket_size > 0 and max_position > 0:
rel_pos_ids = make_log_bucket_position(rel_pos_ids, bucket_size, max_position)
rel_pos_ids = rel_pos_ids[:query_size, :]
rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
return tf.cast(rel_pos_ids, tf.int64)
def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
shapes = [
shape_list(query_layer)[0],
shape_list(query_layer)[1],
shape_list(query_layer)[2],
shape_list(relative_pos)[-1],
]
return tf.broadcast_to(c2p_pos, shapes)
def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
shapes = [
shape_list(query_layer)[0],
shape_list(query_layer)[1],
shape_list(key_layer)[-2],
shape_list(key_layer)[-2],
]
return tf.broadcast_to(c2p_pos, shapes)
def pos_dynamic_expand(pos_index, p2c_att, key_layer):
shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]
return tf.broadcast_to(pos_index, shapes)
def take_along_axis(x, indices):
# Only a valid port of np.take_along_axis when the gather axis is -1
# TPU + gathers and reshapes don't go along well -- see https://github.com/huggingface/transformers/issues/18239
if isinstance(tf.distribute.get_strategy(), tf.distribute.TPUStrategy):
# [B, S, P] -> [B, S, P, D]
one_hot_indices = tf.one_hot(indices, depth=x.shape[-1], dtype=x.dtype)
# if we ignore the first two dims, this is equivalent to multiplying a matrix (one hot) by a vector (x)
# grossly abusing notation: [B, S, P, D] . [B, S, D] = [B, S, P]
gathered = tf.einsum("ijkl,ijl->ijk", one_hot_indices, x)
# GPUs, on the other hand, prefer gathers instead of large one-hot+matmuls
else:
gathered = tf.gather(x, indices, batch_dims=2)
return gathered
class TFDebertaV2DisentangledSelfAttention(keras.layers.Layer):
"""
Disentangled self-attention module
Parameters:
config (`DebertaV2Config`):
A model config class instance with the configuration to build a new model. The schema is similar to
*BertConfig*, for more details, please refer [`DebertaV2Config`]
"""
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
_attention_head_size = config.hidden_size // config.num_attention_heads
self.attention_head_size = getattr(config, "attention_head_size", _attention_head_size)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query_proj = keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="query_proj",
use_bias=True,
)
self.key_proj = keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="key_proj",
use_bias=True,
)
self.value_proj = keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="value_proj",
use_bias=True,
)
self.share_att_key = getattr(config, "share_att_key", False)
self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
self.relative_attention = getattr(config, "relative_attention", False)
if self.relative_attention:
self.position_buckets = getattr(config, "position_buckets", -1)
self.max_relative_positions = getattr(config, "max_relative_positions", -1)
if self.max_relative_positions < 1:
self.max_relative_positions = config.max_position_embeddings
self.pos_ebd_size = self.max_relative_positions
if self.position_buckets > 0:
self.pos_ebd_size = self.position_buckets
self.pos_dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="pos_dropout")
if not self.share_att_key:
if "c2p" in self.pos_att_type:
self.pos_key_proj = keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="pos_proj",
use_bias=True,
)
if "p2c" in self.pos_att_type:
self.pos_query_proj = keras.layers.Dense(
self.all_head_size,
kernel_initializer=get_initializer(config.initializer_range),
name="pos_q_proj",
)
self.softmax = TFDebertaV2XSoftmax(axis=-1)
self.dropout = TFDebertaV2StableDropout(config.attention_probs_dropout_prob, name="dropout")
self.config = config
def transpose_for_scores(self, tensor: tf.Tensor, attention_heads: int) -> tf.Tensor:
tensor_shape = shape_list(tensor)
# In graph mode mode, we can't reshape with -1 as the final dimension if the first dimension (batch size) is None
shape = tensor_shape[:-1] + [attention_heads, tensor_shape[-1] // attention_heads]
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
tensor = tf.reshape(tensor=tensor, shape=shape)
tensor = tf.transpose(tensor, perm=[0, 2, 1, 3])
x_shape = shape_list(tensor)
tensor = tf.reshape(tensor, shape=[-1, x_shape[-2], x_shape[-1]])
return tensor
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
query_states: tf.Tensor | None = None,
relative_pos: tf.Tensor | None = None,
rel_embeddings: tf.Tensor | None = None,
output_attentions: bool = False,
training: bool = False,
) -> tuple[tf.Tensor]:
"""
Call the module
Args:
hidden_states (`tf.Tensor`):
Input states to the module usually the output from previous layer, it will be the Q,K and V in
*Attention(Q,K,V)*
attention_mask (`tf.Tensor`):
An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
th token.
return_att (`bool`, *optional*):
Whether return the attention matrix.
query_states (`tf.Tensor`, *optional*):
The *Q* state in *Attention(Q,K,V)*.
relative_pos (`tf.Tensor`):
The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
values ranging in [*-max_relative_positions*, *max_relative_positions*].
rel_embeddings (`tf.Tensor`):
The embedding of relative distances. It's a tensor of shape [\\(2 \\times
\\text{max_relative_positions}\\), *hidden_size*].
"""
if query_states is None:
query_states = hidden_states
query_layer = self.transpose_for_scores(self.query_proj(query_states), self.num_attention_heads)
key_layer = self.transpose_for_scores(self.key_proj(hidden_states), self.num_attention_heads)
value_layer = self.transpose_for_scores(self.value_proj(hidden_states), self.num_attention_heads)
rel_att = None
# Take the dot product between "query" and "key" to get the raw attention scores.
scale_factor = 1
if "c2p" in self.pos_att_type:
scale_factor += 1
if "p2c" in self.pos_att_type:
scale_factor += 1
scale = tf.math.sqrt(tf.cast(shape_list(query_layer)[-1] * scale_factor, dtype=self.compute_dtype))
attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 2, 1]) / scale)
if self.relative_attention:
rel_embeddings = self.pos_dropout(rel_embeddings)
rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
if rel_att is not None:
attention_scores = attention_scores + rel_att
attention_scores = tf.reshape(
attention_scores,
(-1, self.num_attention_heads, shape_list(attention_scores)[-2], shape_list(attention_scores)[-1]),
)
# bsz x height x length x dimension
attention_probs = self.softmax(attention_scores, attention_mask)
attention_probs = self.dropout(attention_probs, training=training)
context_layer = tf.matmul(
tf.reshape(attention_probs, [-1, shape_list(attention_probs)[-2], shape_list(attention_probs)[-1]]),
value_layer,
)
context_layer = tf.transpose(
tf.reshape(
context_layer,
[-1, self.num_attention_heads, shape_list(context_layer)[-2], shape_list(context_layer)[-1]],
),
[0, 2, 1, 3],
)
# Set the final dimension here explicitly.
# Calling tf.reshape(context_layer, (*context_layer_shape[:-2], -1)) raises an error when executing
# the model in graph mode as context_layer is reshaped to (None, 7, None) and Dense layer in TFDebertaV2SelfOutput
# requires final input dimension to be defined
context_layer_shape = shape_list(context_layer)
new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]]
context_layer = tf.reshape(context_layer, new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
if relative_pos is None:
q = shape_list(query_layer)[-2]
relative_pos = build_relative_position(
q,
shape_list(key_layer)[-2],
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
)
shape_list_pos = shape_list(relative_pos)
if len(shape_list_pos) == 2:
relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)
elif len(shape_list_pos) == 3:
relative_pos = tf.expand_dims(relative_pos, 1)
# bsz x height x query x key
elif len(shape_list_pos) != 4:
raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}")
att_span = self.pos_ebd_size
rel_embeddings = tf.expand_dims(
rel_embeddings[self.pos_ebd_size - att_span : self.pos_ebd_size + att_span, :], 0
)
if self.share_att_key:
pos_query_layer = tf.tile(
self.transpose_for_scores(self.query_proj(rel_embeddings), self.num_attention_heads),
[shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
)
pos_key_layer = tf.tile(
self.transpose_for_scores(self.key_proj(rel_embeddings), self.num_attention_heads),
[shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
)
else:
if "c2p" in self.pos_att_type:
pos_key_layer = tf.tile(
self.transpose_for_scores(self.pos_key_proj(rel_embeddings), self.num_attention_heads),
[shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
) # .split(self.all_head_size, dim=-1)
if "p2c" in self.pos_att_type:
pos_query_layer = tf.tile(
self.transpose_for_scores(self.pos_query_proj(rel_embeddings), self.num_attention_heads),
[shape_list(query_layer)[0] // self.num_attention_heads, 1, 1],
) # .split(self.all_head_size, dim=-1)
score = 0
# content->position
if "c2p" in self.pos_att_type:
scale = tf.math.sqrt(tf.cast(shape_list(pos_key_layer)[-1] * scale_factor, dtype=self.compute_dtype))
c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 2, 1]))
c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)
c2p_att = take_along_axis(
c2p_att,
tf.broadcast_to(
tf.squeeze(c2p_pos, 0),
[shape_list(query_layer)[0], shape_list(query_layer)[1], shape_list(relative_pos)[-1]],
),
)
score += c2p_att / scale
# position->content
if "p2c" in self.pos_att_type:
scale = tf.math.sqrt(tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, dtype=self.compute_dtype))
if shape_list(key_layer)[-2] != shape_list(query_layer)[-2]:
r_pos = build_relative_position(
shape_list(key_layer)[-2],
shape_list(key_layer)[-2],
bucket_size=self.position_buckets,
max_position=self.max_relative_positions,
)
r_pos = tf.expand_dims(r_pos, 0)
else:
r_pos = relative_pos
p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)
p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 2, 1]))
p2c_att = tf.transpose(
take_along_axis(
p2c_att,
tf.broadcast_to(
tf.squeeze(p2c_pos, 0),
[shape_list(query_layer)[0], shape_list(key_layer)[-2], shape_list(key_layer)[-2]],
),
),
[0, 2, 1],
)
score += p2c_att / scale
return score
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "query_proj", None) is not None:
with tf.name_scope(self.query_proj.name):
self.query_proj.build([None, None, self.config.hidden_size])
if getattr(self, "key_proj", None) is not None:
with tf.name_scope(self.key_proj.name):
self.key_proj.build([None, None, self.config.hidden_size])
if getattr(self, "value_proj", None) is not None:
with tf.name_scope(self.value_proj.name):
self.value_proj.build([None, None, self.config.hidden_size])
if getattr(self, "dropout", None) is not None:
with tf.name_scope(self.dropout.name):
self.dropout.build(None)
if getattr(self, "pos_dropout", None) is not None:
with tf.name_scope(self.pos_dropout.name):
self.pos_dropout.build(None)
if getattr(self, "pos_key_proj", None) is not None:
with tf.name_scope(self.pos_key_proj.name):
self.pos_key_proj.build([None, None, self.config.hidden_size])
if getattr(self, "pos_query_proj", None) is not None:
with tf.name_scope(self.pos_query_proj.name):
self.pos_query_proj.build([None, None, self.config.hidden_size])
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaEmbeddings Deberta->DebertaV2
class TFDebertaV2Embeddings(keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
self.hidden_size = config.hidden_size
self.max_position_embeddings = config.max_position_embeddings
self.position_biased_input = getattr(config, "position_biased_input", True)
self.initializer_range = config.initializer_range
if self.embedding_size != config.hidden_size:
self.embed_proj = keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
name="embed_proj",
use_bias=False,
)
self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = TFDebertaV2StableDropout(config.hidden_dropout_prob, name="dropout")
def build(self, input_shape=None):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.config.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
if self.config.type_vocab_size > 0:
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.config.type_vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
else:
self.token_type_embeddings = None
with tf.name_scope("position_embeddings"):
if self.position_biased_input:
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.hidden_size],
initializer=get_initializer(self.initializer_range),
)
else:
self.position_embeddings = None
if self.built:
return
self.built = True
if getattr(self, "LayerNorm", None) is not None:
with tf.name_scope(self.LayerNorm.name):
self.LayerNorm.build([None, None, self.config.hidden_size])
if getattr(self, "dropout", None) is not None:
with tf.name_scope(self.dropout.name):
self.dropout.build(None)
if getattr(self, "embed_proj", None) is not None:
with tf.name_scope(self.embed_proj.name):
self.embed_proj.build([None, None, self.embedding_size])
def call(
self,
input_ids: tf.Tensor | None = None,
position_ids: tf.Tensor | None = None,
token_type_ids: tf.Tensor | None = None,
inputs_embeds: tf.Tensor | None = None,
mask: tf.Tensor | None = None,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
"""
if input_ids is None and inputs_embeds is None:
raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
if input_ids is not None:
check_embeddings_within_bounds(input_ids, self.config.vocab_size)
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
final_embeddings = inputs_embeds
if self.position_biased_input:
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
final_embeddings += position_embeds
if self.config.type_vocab_size > 0:
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings += token_type_embeds
if self.embedding_size != self.hidden_size:
final_embeddings = self.embed_proj(final_embeddings)
final_embeddings = self.LayerNorm(final_embeddings)
if mask is not None:
if len(shape_list(mask)) != len(shape_list(final_embeddings)):
if len(shape_list(mask)) == 4:
mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)
mask = tf.cast(tf.expand_dims(mask, axis=2), dtype=self.compute_dtype)
final_embeddings = final_embeddings * mask
final_embeddings = self.dropout(final_embeddings, training=training)
return final_embeddings
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaPredictionHeadTransform with Deberta->DebertaV2
class TFDebertaV2PredictionHeadTransform(keras.layers.Layer):
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
self.dense = keras.layers.Dense(
units=self.embedding_size,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.config = config
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "dense", None) is not None:
with tf.name_scope(self.dense.name):
self.dense.build([None, None, self.config.hidden_size])
if getattr(self, "LayerNorm", None) is not None:
with tf.name_scope(self.LayerNorm.name):
self.LayerNorm.build([None, None, self.embedding_size])
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaLMPredictionHead with Deberta->DebertaV2
class TFDebertaV2LMPredictionHead(keras.layers.Layer):
def __init__(self, config: DebertaV2Config, input_embeddings: keras.layers.Layer, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
self.transform = TFDebertaV2PredictionHeadTransform(config, name="transform")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape=None):
self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
if self.built:
return
self.built = True
if getattr(self, "transform", None) is not None:
with tf.name_scope(self.transform.name):
self.transform.build(None)
def get_output_embeddings(self) -> keras.layers.Layer:
return self.input_embeddings
def set_output_embeddings(self, value: tf.Variable):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self) -> dict[str, tf.Variable]:
return {"bias": self.bias}
def set_bias(self, value: tf.Variable):
self.bias = value["bias"]
self.config.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.transform(hidden_states=hidden_states)
seq_length = shape_list(hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaOnlyMLMHead with Deberta->DebertaV2
class TFDebertaV2OnlyMLMHead(keras.layers.Layer):
def __init__(self, config: DebertaV2Config, input_embeddings: keras.layers.Layer, **kwargs):
super().__init__(**kwargs)
self.predictions = TFDebertaV2LMPredictionHead(config, input_embeddings, name="predictions")
def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
prediction_scores = self.predictions(hidden_states=sequence_output)
return prediction_scores
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "predictions", None) is not None:
with tf.name_scope(self.predictions.name):
self.predictions.build(None)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaMainLayer with Deberta->DebertaV2
class TFDebertaV2MainLayer(keras.layers.Layer):
config_class = DebertaV2Config
def __init__(self, config: DebertaV2Config, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embeddings = TFDebertaV2Embeddings(config, name="embeddings")
self.encoder = TFDebertaV2Encoder(config, name="encoder")
def get_input_embeddings(self) -> keras.layers.Layer:
return self.embeddings
def set_input_embeddings(self, value: tf.Variable):
self.embeddings.weight = value
self.embeddings.vocab_size = shape_list(value)[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
@unpack_inputs
def call(
self,
input_ids: TFModelInputType | None = None,
attention_mask: np.ndarray | tf.Tensor | None = None,
token_type_ids: np.ndarray | tf.Tensor | None = None,
position_ids: np.ndarray | tf.Tensor | None = None,
inputs_embeds: np.ndarray | tf.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
training: bool = False,
) -> TFBaseModelOutput | tuple[tf.Tensor]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if attention_mask is None:
attention_mask = tf.fill(dims=input_shape, value=1)
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
mask=attention_mask,
training=training,
)
encoder_outputs = self.encoder(
hidden_states=embedding_output,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = encoder_outputs[0]
if not return_dict:
return (sequence_output,) + encoder_outputs[1:]
return TFBaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "embeddings", None) is not None:
with tf.name_scope(self.embeddings.name):
self.embeddings.build(None)
if getattr(self, "encoder", None) is not None:
with tf.name_scope(self.encoder.name):
self.encoder.build(None)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaPreTrainedModel with Deberta->DebertaV2
class TFDebertaV2PreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DebertaV2Config
base_model_prefix = "deberta"
DEBERTA_START_DOCSTRING = r"""
The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
Attention](https://huggingface.co/papers/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TensorFlow models and layers in `transformers` accept two formats as input:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional argument.
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
positional argument:
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Note that when creating models and layers with
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
about any of this, as you can just pass inputs like you would to any other Python function!
</Tip>
Parameters:
config ([`DebertaV2Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
DEBERTA_INPUTS_DOCSTRING = r"""
Args:
input_ids (`np.ndarray`, `tf.Tensor`, `list[tf.Tensor]` ``dict[str, tf.Tensor]` or `dict[str, np.ndarray]` and each example must have the shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaModel with Deberta->DebertaV2
class TFDebertaV2Model(TFDebertaV2PreTrainedModel):
def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.deberta = TFDebertaV2MainLayer(config, name="deberta")
@unpack_inputs
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: TFModelInputType | None = None,
attention_mask: np.ndarray | tf.Tensor | None = None,
token_type_ids: np.ndarray | tf.Tensor | None = None,
position_ids: np.ndarray | tf.Tensor | None = None,
inputs_embeds: np.ndarray | tf.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
training: bool | None = False,
) -> TFBaseModelOutput | tuple[tf.Tensor]:
outputs = self.deberta(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "deberta", None) is not None:
with tf.name_scope(self.deberta.name):
self.deberta.build(None)
@add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForMaskedLM with Deberta->DebertaV2
class TFDebertaV2ForMaskedLM(TFDebertaV2PreTrainedModel, TFMaskedLanguageModelingLoss):
def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
if config.is_decoder:
logger.warning(
"If you want to use `TFDebertaV2ForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.deberta = TFDebertaV2MainLayer(config, name="deberta")
self.mlm = TFDebertaV2OnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name="cls")
def get_lm_head(self) -> keras.layers.Layer:
return self.mlm.predictions
@unpack_inputs
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: TFModelInputType | None = None,
attention_mask: np.ndarray | tf.Tensor | None = None,
token_type_ids: np.ndarray | tf.Tensor | None = None,
position_ids: np.ndarray | tf.Tensor | None = None,
inputs_embeds: np.ndarray | tf.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
labels: np.ndarray | tf.Tensor | None = None,
training: bool | None = False,
) -> TFMaskedLMOutput | tuple[tf.Tensor]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
outputs = self.deberta(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "deberta", None) is not None:
with tf.name_scope(self.deberta.name):
self.deberta.build(None)
if getattr(self, "mlm", None) is not None:
with tf.name_scope(self.mlm.name):
self.mlm.build(None)
@add_start_docstrings(
"""
DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForSequenceClassification with Deberta->DebertaV2
class TFDebertaV2ForSequenceClassification(TFDebertaV2PreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.deberta = TFDebertaV2MainLayer(config, name="deberta")
self.pooler = TFDebertaV2ContextPooler(config, name="pooler")
drop_out = getattr(config, "cls_dropout", None)
drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
self.dropout = TFDebertaV2StableDropout(drop_out, name="cls_dropout")
self.classifier = keras.layers.Dense(
units=config.num_labels,
kernel_initializer=get_initializer(config.initializer_range),
name="classifier",
)
self.output_dim = self.pooler.output_dim
@unpack_inputs
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: TFModelInputType | None = None,
attention_mask: np.ndarray | tf.Tensor | None = None,
token_type_ids: np.ndarray | tf.Tensor | None = None,
position_ids: np.ndarray | tf.Tensor | None = None,
inputs_embeds: np.ndarray | tf.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
labels: np.ndarray | tf.Tensor | None = None,
training: bool | None = False,
) -> TFSequenceClassifierOutput | tuple[tf.Tensor]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs = self.deberta(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
pooled_output = self.pooler(sequence_output, training=training)
pooled_output = self.dropout(pooled_output, training=training)
logits = self.classifier(pooled_output)
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "deberta", None) is not None:
with tf.name_scope(self.deberta.name):
self.deberta.build(None)
if getattr(self, "pooler", None) is not None:
with tf.name_scope(self.pooler.name):
self.pooler.build(None)
if getattr(self, "dropout", None) is not None:
with tf.name_scope(self.dropout.name):
self.dropout.build(None)
if getattr(self, "classifier", None) is not None:
with tf.name_scope(self.classifier.name):
self.classifier.build([None, None, self.output_dim])
@add_start_docstrings(
"""
DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForTokenClassification with Deberta->DebertaV2
class TFDebertaV2ForTokenClassification(TFDebertaV2PreTrainedModel, TFTokenClassificationLoss):
def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.deberta = TFDebertaV2MainLayer(config, name="deberta")
self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.classifier = keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
self.config = config
@unpack_inputs
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: TFModelInputType | None = None,
attention_mask: np.ndarray | tf.Tensor | None = None,
token_type_ids: np.ndarray | tf.Tensor | None = None,
position_ids: np.ndarray | tf.Tensor | None = None,
inputs_embeds: np.ndarray | tf.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
labels: np.ndarray | tf.Tensor | None = None,
training: bool | None = False,
) -> TFTokenClassifierOutput | tuple[tf.Tensor]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
outputs = self.deberta(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=training)
logits = self.classifier(inputs=sequence_output)
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
if not return_dict:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "deberta", None) is not None:
with tf.name_scope(self.deberta.name):
self.deberta.build(None)
if getattr(self, "classifier", None) is not None:
with tf.name_scope(self.classifier.name):
self.classifier.build([None, None, self.config.hidden_size])
@add_start_docstrings(
"""
DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
DEBERTA_START_DOCSTRING,
)
# Copied from transformers.models.deberta.modeling_tf_deberta.TFDebertaForQuestionAnswering with Deberta->DebertaV2
class TFDebertaV2ForQuestionAnswering(TFDebertaV2PreTrainedModel, TFQuestionAnsweringLoss):
def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.deberta = TFDebertaV2MainLayer(config, name="deberta")
self.qa_outputs = keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
self.config = config
@unpack_inputs
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: TFModelInputType | None = None,
attention_mask: np.ndarray | tf.Tensor | None = None,
token_type_ids: np.ndarray | tf.Tensor | None = None,
position_ids: np.ndarray | tf.Tensor | None = None,
inputs_embeds: np.ndarray | tf.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
start_positions: np.ndarray | tf.Tensor | None = None,
end_positions: np.ndarray | tf.Tensor | None = None,
training: bool | None = False,
) -> TFQuestionAnsweringModelOutput | tuple[tf.Tensor]:
r"""
start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
outputs = self.deberta(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
logits = self.qa_outputs(inputs=sequence_output)
start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
start_logits = tf.squeeze(input=start_logits, axis=-1)
end_logits = tf.squeeze(input=end_logits, axis=-1)
loss = None
if start_positions is not None and end_positions is not None:
labels = {"start_position": start_positions}
labels["end_position"] = end_positions
loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "deberta", None) is not None:
with tf.name_scope(self.deberta.name):
self.deberta.build(None)
if getattr(self, "qa_outputs", None) is not None:
with tf.name_scope(self.qa_outputs.name):
self.qa_outputs.build([None, None, self.config.hidden_size])
@add_start_docstrings(
"""
DeBERTa Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
DEBERTA_START_DOCSTRING,
)
class TFDebertaV2ForMultipleChoice(TFDebertaV2PreTrainedModel, TFMultipleChoiceLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
# _keys_to_ignore_on_load_unexpected = [r"mlm___cls", r"nsp___cls", r"cls.predictions", r"cls.seq_relationship"]
# _keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config: DebertaV2Config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.deberta = TFDebertaV2MainLayer(config, name="deberta")
self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.pooler = TFDebertaV2ContextPooler(config, name="pooler")
self.classifier = keras.layers.Dense(
units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
self.output_dim = self.pooler.output_dim
@unpack_inputs
@add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: TFModelInputType | None = None,
attention_mask: np.ndarray | tf.Tensor | None = None,
token_type_ids: np.ndarray | tf.Tensor | None = None,
position_ids: np.ndarray | tf.Tensor | None = None,
inputs_embeds: np.ndarray | tf.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
labels: np.ndarray | tf.Tensor | None = None,
training: bool | None = False,
) -> TFMultipleChoiceModelOutput | tuple[tf.Tensor]:
r"""
labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
"""
if input_ids is not None:
num_choices = shape_list(input_ids)[1]
seq_length = shape_list(input_ids)[2]
else:
num_choices = shape_list(inputs_embeds)[1]
seq_length = shape_list(inputs_embeds)[2]
flat_input_ids = tf.reshape(tensor=input_ids, shape=(-1, seq_length)) if input_ids is not None else None
flat_attention_mask = (
tf.reshape(tensor=attention_mask, shape=(-1, seq_length)) if attention_mask is not None else None
)
flat_token_type_ids = (
tf.reshape(tensor=token_type_ids, shape=(-1, seq_length)) if token_type_ids is not None else None
)
flat_position_ids = (
tf.reshape(tensor=position_ids, shape=(-1, seq_length)) if position_ids is not None else None
)
flat_inputs_embeds = (
tf.reshape(tensor=inputs_embeds, shape=(-1, seq_length, shape_list(inputs_embeds)[3]))
if inputs_embeds is not None
else None
)
outputs = self.deberta(
input_ids=flat_input_ids,
attention_mask=flat_attention_mask,
token_type_ids=flat_token_type_ids,
position_ids=flat_position_ids,
inputs_embeds=flat_inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
pooled_output = self.pooler(sequence_output, training=training)
pooled_output = self.dropout(pooled_output, training=training)
logits = self.classifier(pooled_output)
reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=reshaped_logits)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "deberta", None) is not None:
with tf.name_scope(self.deberta.name):
self.deberta.build(None)
if getattr(self, "pooler", None) is not None:
with tf.name_scope(self.pooler.name):
self.pooler.build(None)
if getattr(self, "classifier", None) is not None:
with tf.name_scope(self.classifier.name):
self.classifier.build([None, None, self.output_dim])
__all__ = [
"TFDebertaV2ForMaskedLM",
"TFDebertaV2ForQuestionAnswering",
"TFDebertaV2ForMultipleChoice",
"TFDebertaV2ForSequenceClassification",
"TFDebertaV2ForTokenClassification",
"TFDebertaV2Model",
"TFDebertaV2PreTrainedModel",
]
| transformers/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py/0 | {
"file_path": "transformers/src/transformers/models/deberta_v2/modeling_tf_deberta_v2.py",
"repo_id": "transformers",
"token_count": 36496
} | 451 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Deformable DETR checkpoints."""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeformableDetrConfig, DeformableDetrForObjectDetection, DeformableDetrImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
def rename_key(orig_key):
if "backbone.0.body" in orig_key:
orig_key = orig_key.replace("backbone.0.body", "backbone.conv_encoder.model")
if "transformer" in orig_key:
orig_key = orig_key.replace("transformer.", "")
if "norm1" in orig_key:
if "encoder" in orig_key:
orig_key = orig_key.replace("norm1", "self_attn_layer_norm")
else:
orig_key = orig_key.replace("norm1", "encoder_attn_layer_norm")
if "norm2" in orig_key:
if "encoder" in orig_key:
orig_key = orig_key.replace("norm2", "final_layer_norm")
else:
orig_key = orig_key.replace("norm2", "self_attn_layer_norm")
if "norm3" in orig_key:
orig_key = orig_key.replace("norm3", "final_layer_norm")
if "linear1" in orig_key:
orig_key = orig_key.replace("linear1", "fc1")
if "linear2" in orig_key:
orig_key = orig_key.replace("linear2", "fc2")
if "query_embed" in orig_key:
orig_key = orig_key.replace("query_embed", "query_position_embeddings")
if "cross_attn" in orig_key:
orig_key = orig_key.replace("cross_attn", "encoder_attn")
return orig_key
def read_in_q_k_v(state_dict):
# transformer decoder self-attention layers
for i in range(6):
# read in weights + bias of input projection layer of self-attention
in_proj_weight = state_dict.pop(f"decoder.layers.{i}.self_attn.in_proj_weight")
in_proj_bias = state_dict.pop(f"decoder.layers.{i}.self_attn.in_proj_bias")
# next, add query, keys and values (in that order) to the state dict
state_dict[f"decoder.layers.{i}.self_attn.q_proj.weight"] = in_proj_weight[:256, :]
state_dict[f"decoder.layers.{i}.self_attn.q_proj.bias"] = in_proj_bias[:256]
state_dict[f"decoder.layers.{i}.self_attn.k_proj.weight"] = in_proj_weight[256:512, :]
state_dict[f"decoder.layers.{i}.self_attn.k_proj.bias"] = in_proj_bias[256:512]
state_dict[f"decoder.layers.{i}.self_attn.v_proj.weight"] = in_proj_weight[-256:, :]
state_dict[f"decoder.layers.{i}.self_attn.v_proj.bias"] = in_proj_bias[-256:]
# We will verify our results on an image of cute cats
def prepare_img():
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
im = Image.open(requests.get(url, stream=True).raw)
return im
@torch.no_grad()
def convert_deformable_detr_checkpoint(
checkpoint_path,
single_scale,
dilation,
with_box_refine,
two_stage,
pytorch_dump_folder_path,
push_to_hub,
):
"""
Copy/paste/tweak model's weights to our Deformable DETR structure.
"""
# load default config
config = DeformableDetrConfig()
# set config attributes
if single_scale:
config.num_feature_levels = 1
config.dilation = dilation
config.with_box_refine = with_box_refine
config.two_stage = two_stage
# set labels
config.num_labels = 91
repo_id = "huggingface/label-files"
filename = "coco-detection-id2label.json"
id2label = json.loads(Path(hf_hub_download(repo_id, filename, repo_type="dataset")).read_text())
id2label = {int(k): v for k, v in id2label.items()}
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
# load image processor
image_processor = DeformableDetrImageProcessor(format="coco_detection")
# prepare image
img = prepare_img()
encoding = image_processor(images=img, return_tensors="pt")
pixel_values = encoding["pixel_values"]
logger.info("Converting model...")
# load original state dict
state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model"]
# rename keys
for key in state_dict.copy():
val = state_dict.pop(key)
state_dict[rename_key(key)] = val
# query, key and value matrices need special treatment
read_in_q_k_v(state_dict)
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
prefix = "model."
for key in state_dict.copy():
if not key.startswith("class_embed") and not key.startswith("bbox_embed"):
val = state_dict.pop(key)
state_dict[prefix + key] = val
# finally, create HuggingFace model and load state dict
model = DeformableDetrForObjectDetection(config)
model.load_state_dict(state_dict)
model.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
# verify our conversion
outputs = model(pixel_values.to(device))
expected_logits = torch.tensor(
[[-9.6645, -4.3449, -5.8705], [-9.7035, -3.8504, -5.0724], [-10.5634, -5.3379, -7.5116]]
)
expected_boxes = torch.tensor([[0.8693, 0.2289, 0.2492], [0.3150, 0.5489, 0.5845], [0.5563, 0.7580, 0.8518]])
if single_scale:
expected_logits = torch.tensor(
[[-9.9051, -4.2541, -6.4852], [-9.6947, -4.0854, -6.8033], [-10.0665, -5.8470, -7.7003]]
)
expected_boxes = torch.tensor([[0.7292, 0.4991, 0.5532], [0.7959, 0.2426, 0.4236], [0.7582, 0.3518, 0.4451]])
if single_scale and dilation:
expected_logits = torch.tensor(
[[-8.9652, -4.1074, -5.6635], [-9.0596, -4.9447, -6.6075], [-10.1178, -4.5275, -6.2671]]
)
expected_boxes = torch.tensor([[0.7665, 0.4130, 0.4769], [0.8364, 0.1841, 0.3391], [0.6261, 0.3895, 0.7978]])
if with_box_refine:
expected_logits = torch.tensor(
[[-8.8895, -5.4187, -6.8153], [-8.4706, -6.1668, -7.6184], [-9.0042, -5.5359, -6.9141]]
)
expected_boxes = torch.tensor([[0.7828, 0.2208, 0.4323], [0.0892, 0.5996, 0.1319], [0.5524, 0.6389, 0.8914]])
if with_box_refine and two_stage:
expected_logits = torch.tensor(
[[-6.7108, -4.3213, -6.3777], [-8.9014, -6.1799, -6.7240], [-6.9315, -4.4735, -6.2298]]
)
expected_boxes = torch.tensor([[0.2583, 0.5499, 0.4683], [0.7652, 0.9068, 0.4882], [0.5490, 0.2763, 0.0564]])
print("Logits:", outputs.logits[0, :3, :3])
assert torch.allclose(outputs.logits[0, :3, :3], expected_logits.to(device), atol=1e-4)
assert torch.allclose(outputs.pred_boxes[0, :3, :3], expected_boxes.to(device), atol=1e-4)
print("Everything ok!")
# Save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}...")
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
model.save_pretrained(pytorch_dump_folder_path)
image_processor.save_pretrained(pytorch_dump_folder_path)
# Push to hub
if push_to_hub:
model_name = "deformable-detr"
model_name += "-single-scale" if single_scale else ""
model_name += "-dc5" if dilation else ""
model_name += "-with-box-refine" if with_box_refine else ""
model_name += "-two-stage" if two_stage else ""
print("Pushing model to hub...")
model.push_to_hub(repo_path_or_name=model_name, organization="nielsr", commit_message="Add model")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
type=str,
default="/home/niels/checkpoints/deformable_detr/r50_deformable_detr-checkpoint.pth",
help="Path to Pytorch checkpoint (.pth file) you'd like to convert.",
)
parser.add_argument("--single_scale", action="store_true", help="Whether to set config.num_features_levels = 1.")
parser.add_argument("--dilation", action="store_true", help="Whether to set config.dilation=True.")
parser.add_argument("--with_box_refine", action="store_true", help="Whether to set config.with_box_refine=True.")
parser.add_argument("--two_stage", action="store_true", help="Whether to set config.two_stage=True.")
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
args = parser.parse_args()
convert_deformable_detr_checkpoint(
args.checkpoint_path,
args.single_scale,
args.dilation,
args.with_box_refine,
args.two_stage,
args.pytorch_dump_folder_path,
args.push_to_hub,
)
| transformers/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/deformable_detr/convert_deformable_detr_to_pytorch.py",
"repo_id": "transformers",
"token_count": 4052
} | 452 |
# coding=utf-8
# Copyright 2020, The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Bort checkpoint."""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
SAMPLE_TEXT = "The Nymphenburg Palace is a beautiful palace in Munich!"
def convert_bort_checkpoint_to_pytorch(bort_checkpoint_path: str, pytorch_dump_folder_path: str):
"""
Convert the original Bort checkpoint (based on MXNET and Gluonnlp) to our BERT structure-
"""
# Original Bort configuration
bort_4_8_768_1024_hparams = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
predefined_args = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
encoder = BERTEncoder(
attention_cell=predefined_args["attention_cell"],
num_layers=predefined_args["num_layers"],
units=predefined_args["units"],
hidden_size=predefined_args["hidden_size"],
max_length=predefined_args["max_length"],
num_heads=predefined_args["num_heads"],
scaled=predefined_args["scaled"],
dropout=predefined_args["dropout"],
output_attention=False,
output_all_encodings=False,
use_residual=predefined_args["use_residual"],
activation=predefined_args.get("activation", "gelu"),
layer_norm_eps=predefined_args.get("layer_norm_eps", None),
)
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
vocab_name = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
gluon_cache_dir = os.path.join(get_home_dir(), "models")
bort_vocab = _load_vocab(vocab_name, None, gluon_cache_dir, cls=Vocab)
original_bort = nlp.model.BERTModel(
encoder,
len(bort_vocab),
units=predefined_args["units"],
embed_size=predefined_args["embed_size"],
embed_dropout=predefined_args["embed_dropout"],
word_embed=predefined_args["word_embed"],
use_pooler=False,
use_token_type_embed=False,
token_type_vocab_size=predefined_args["token_type_vocab_size"],
use_classifier=False,
use_decoder=False,
)
original_bort.load_parameters(bort_checkpoint_path, cast_dtype=True, ignore_extra=True)
params = original_bort._collect_params_with_prefix()
# Build our config 🤗
hf_bort_config_json = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(bort_vocab),
}
hf_bort_config = BertConfig.from_dict(hf_bort_config_json)
hf_bort_model = BertForMaskedLM(hf_bort_config)
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(mx_array) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy()))
# Check param shapes and map new HF param back
def check_and_map_params(hf_param, gluon_param):
shape_hf = hf_param.shape
gluon_param = to_torch(params[gluon_param])
shape_gluon = gluon_param.shape
assert shape_hf == shape_gluon, (
f"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
)
return gluon_param
hf_bort_model.bert.embeddings.word_embeddings.weight = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight, "word_embed.0.weight"
)
hf_bort_model.bert.embeddings.position_embeddings.weight = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight, "encoder.position_weight"
)
hf_bort_model.bert.embeddings.LayerNorm.bias = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias, "encoder.layer_norm.beta"
)
hf_bort_model.bert.embeddings.LayerNorm.weight = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight, "encoder.layer_norm.gamma"
)
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data
)
for i in range(hf_bort_config.num_hidden_layers):
layer: BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
self_attn: BertSelfAttention = layer.attention.self
self_attn.key.bias.data = check_and_map_params(
self_attn.key.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_key.bias"
)
self_attn.key.weight.data = check_and_map_params(
self_attn.key.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_key.weight"
)
self_attn.query.bias.data = check_and_map_params(
self_attn.query.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_query.bias"
)
self_attn.query.weight.data = check_and_map_params(
self_attn.query.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_query.weight"
)
self_attn.value.bias.data = check_and_map_params(
self_attn.value.bias.data, f"encoder.transformer_cells.{i}.attention_cell.proj_value.bias"
)
self_attn.value.weight.data = check_and_map_params(
self_attn.value.weight.data, f"encoder.transformer_cells.{i}.attention_cell.proj_value.weight"
)
# self attention output
self_output: BertSelfOutput = layer.attention.output
self_output.dense.bias = check_and_map_params(
self_output.dense.bias, f"encoder.transformer_cells.{i}.proj.bias"
)
self_output.dense.weight = check_and_map_params(
self_output.dense.weight, f"encoder.transformer_cells.{i}.proj.weight"
)
self_output.LayerNorm.bias = check_and_map_params(
self_output.LayerNorm.bias, f"encoder.transformer_cells.{i}.layer_norm.beta"
)
self_output.LayerNorm.weight = check_and_map_params(
self_output.LayerNorm.weight, f"encoder.transformer_cells.{i}.layer_norm.gamma"
)
# intermediate
intermediate: BertIntermediate = layer.intermediate
intermediate.dense.bias = check_and_map_params(
intermediate.dense.bias, f"encoder.transformer_cells.{i}.ffn.ffn_1.bias"
)
intermediate.dense.weight = check_and_map_params(
intermediate.dense.weight, f"encoder.transformer_cells.{i}.ffn.ffn_1.weight"
)
# output
bert_output: BertOutput = layer.output
bert_output.dense.bias = check_and_map_params(
bert_output.dense.bias, f"encoder.transformer_cells.{i}.ffn.ffn_2.bias"
)
bert_output.dense.weight = check_and_map_params(
bert_output.dense.weight, f"encoder.transformer_cells.{i}.ffn.ffn_2.weight"
)
bert_output.LayerNorm.bias = check_and_map_params(
bert_output.LayerNorm.bias, f"encoder.transformer_cells.{i}.ffn.layer_norm.beta"
)
bert_output.LayerNorm.weight = check_and_map_params(
bert_output.LayerNorm.weight, f"encoder.transformer_cells.{i}.ffn.layer_norm.gamma"
)
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
tokenizer = RobertaTokenizer.from_pretrained("FacebookAI/roberta-base")
input_ids = tokenizer.encode_plus(SAMPLE_TEXT)["input_ids"]
# Get gluon output
gluon_input_ids = mx.nd.array([input_ids])
output_gluon = original_bort(inputs=gluon_input_ids, token_types=[])
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(pytorch_dump_folder_path)
hf_bort_model = BertModel.from_pretrained(pytorch_dump_folder_path)
hf_bort_model.eval()
input_ids = tokenizer.encode_plus(SAMPLE_TEXT, return_tensors="pt")
output_hf = hf_bort_model(**input_ids)[0]
gluon_layer = output_gluon[0].asnumpy()
hf_layer = output_hf[0].detach().numpy()
max_absolute_diff = np.max(np.abs(hf_layer - gluon_layer)).item()
success = np.allclose(gluon_layer, hf_layer, atol=1e-3)
if success:
print("✔️ Both model do output the same tensors")
else:
print("❌ Both model do **NOT** output the same tensors")
print("Absolute difference is:", max_absolute_diff)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
args = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| transformers/src/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/deprecated/bort/convert_bort_original_gluonnlp_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 6182
} | 453 |
# coding=utf-8
# Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for Ernie-M."""
import os
import unicodedata
from typing import Any, Optional
import sentencepiece as spm
from ....tokenization_utils import PreTrainedTokenizer
from ....utils import logging
from ....utils.import_utils import requires
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
RESOURCE_FILES_NAMES = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
# Adapted from paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer
@requires(backends=("sentencepiece",))
class ErnieMTokenizer(PreTrainedTokenizer):
r"""
Constructs a Ernie-M tokenizer. It uses the `sentencepiece` tools to cut the words to sub-words.
Args:
sentencepiece_model_file (`str`):
The file path of sentencepiece model.
vocab_file (`str`, *optional*):
The file path of the vocabulary.
do_lower_case (`str`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
A special token representing the `unknown (out-of-vocabulary)` token. An unknown token is set to be
`unk_token` inorder to be converted to an ID.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
A special token separating two different sentences in the same input.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
A special token used to make arrays of tokens the same size for batching purposes.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
A special token used for sequence classification. It is the last token of the sequence when built with
special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
A special token representing a masked token. This is the token used in the masked language modeling task
which the model tries to predict the original unmasked ones.
"""
# Ernie-M model doesn't have token_type embedding.
model_input_names: list[str] = ["input_ids"]
vocab_files_names = VOCAB_FILES_NAMES
resource_files_names = RESOURCE_FILES_NAMES
def __init__(
self,
sentencepiece_model_ckpt,
vocab_file=None,
do_lower_case=False,
encoding="utf8",
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
sp_model_kwargs: Optional[dict[str, Any]] = None,
**kwargs,
) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
self.do_lower_case = do_lower_case
self.sentencepiece_model_ckpt = sentencepiece_model_ckpt
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(sentencepiece_model_ckpt)
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
self.vocab = self.load_vocab(filepath=vocab_file)
else:
self.vocab = {self.sp_model.id_to_piece(id): id for id in range(self.sp_model.get_piece_size())}
self.reverse_vocab = {v: k for k, v in self.vocab.items()}
super().__init__(
do_lower_case=do_lower_case,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
vocab_file=vocab_file,
encoding=encoding,
sp_model_kwargs=self.sp_model_kwargs,
**kwargs,
)
def get_offset_mapping(self, text):
if text is None:
return None
split_tokens = self.tokenize(text)
normalized_text, char_mapping = "", []
for i, ch in enumerate(text):
if ch in self.SP_CHAR_MAPPING:
ch = self.SP_CHAR_MAPPING.get(ch)
else:
ch = unicodedata.normalize("NFKC", ch)
if self.is_whitespace(ch):
continue
normalized_text += ch
char_mapping.extend([i] * len(ch))
text, token_mapping, offset = normalized_text, [], 0
if self.do_lower_case:
text = text.lower()
for token in split_tokens:
if token[:1] == "▁":
token = token[1:]
start = text[offset:].index(token) + offset
end = start + len(token)
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1))
offset = end
return token_mapping
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def __getstate__(self):
state = self.__dict__.copy()
state["sp_model"] = None
return state
def __setstate__(self, d):
self.__dict__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
self.sp_model_kwargs = {}
self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.sentencepiece_model_ckpt)
def clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
return "".join(self.SP_CHAR_MAPPING.get(c, c) for c in text)
def _tokenize(self, text, enable_sampling=False, nbest_size=64, alpha=0.1):
"""Tokenize a string."""
if self.sp_model_kwargs.get("enable_sampling") is True:
enable_sampling = True
if self.sp_model_kwargs.get("alpha") is not None:
alpha = self.sp_model_kwargs.get("alpha")
if self.sp_model_kwargs.get("nbest_size") is not None:
nbest_size = self.sp_model_kwargs.get("nbest_size")
if not enable_sampling:
pieces = self.sp_model.EncodeAsPieces(text)
else:
pieces = self.sp_model.SampleEncodeAsPieces(text, nbest_size, alpha)
new_pieces = []
for pi, piece in enumerate(pieces):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(SPIECE_UNDERLINE) and pi != 0:
new_pieces.append(SPIECE_UNDERLINE)
continue
else:
continue
lst_i = 0
for i, chunk in enumerate(piece):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(chunk) or self.is_punct(chunk):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
new_pieces.append(chunk)
lst_i = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
lst_i = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i])
lst_i = i
if len(piece) > lst_i:
new_pieces.append(piece[lst_i:])
return new_pieces
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def convert_ids_to_string(self, ids):
"""
Converts a sequence of tokens (strings for sub-words) in a single string.
"""
tokens = self.convert_ids_to_tokens(ids)
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
def _convert_token_to_id(self, token):
return self.vocab.get(token, self.vocab.get(self.unk_token))
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.reverse_vocab.get(index, self.unk_token)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
r"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. An ErnieM sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] [SEP] B [SEP]`
Args:
token_ids_0 (`list[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`list[int]`: List of input_id with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep
def build_offset_mapping_with_special_tokens(self, offset_mapping_0, offset_mapping_1=None):
r"""
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens. An Ernie-M
offset_mapping has the following format:
- single sequence: `(0,0) X (0,0)`
- pair of sequences: `(0,0) A (0,0) (0,0) B (0,0)`
Args:
offset_mapping_ids_0 (`list[tuple]`):
List of char offsets to which the special tokens will be added.
offset_mapping_ids_1 (`list[tuple]`, *optional*):
Optional second list of wordpiece offsets for offset mapping pairs.
Returns:
`list[tuple]`: List of wordpiece offsets with the appropriate offsets of special tokens.
"""
if offset_mapping_1 is None:
return [(0, 0)] + offset_mapping_0 + [(0, 0)]
return [(0, 0)] + offset_mapping_0 + [(0, 0), (0, 0)] + offset_mapping_1 + [(0, 0)]
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
r"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `encode` method.
Args:
token_ids_0 (`list[int]`):
List of ids of the first sequence.
token_ids_1 (`list[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`str`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`list[int]`:
The list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_0]
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: list[int], token_ids_1: Optional[list[int]] = None
) -> list[int]:
"""
Create the token type IDs corresponding to the sequences passed. [What are token type
IDs?](../glossary#token-type-ids) Should be overridden in a subclass if the model has a special way of
building: those.
Args:
token_ids_0 (`list[int]`):
The first tokenized sequence.
token_ids_1 (`list[int]`, *optional*):
The second tokenized sequence.
Returns:
`list[int]`: The token type ids.
"""
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_1 is None:
# [CLS] X [SEP]
return (len(token_ids_0) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(token_ids_0) + 1) + [1] * (len(token_ids_1) + 3)
def is_ch_char(self, char):
"""
is_ch_char
"""
if "\u4e00" <= char <= "\u9fff":
return True
return False
def is_alpha(self, char):
"""
is_alpha
"""
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def is_punct(self, char):
"""
is_punct
"""
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def is_whitespace(self, char):
"""
is whitespace
"""
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(char) == 1:
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def load_vocab(self, filepath):
token_to_idx = {}
with open(filepath, "r", encoding="utf-8") as f:
for index, line in enumerate(f):
token = line.rstrip("\n")
token_to_idx[token] = int(index)
return token_to_idx
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
else:
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!"
)
index = token_index
writer.write(token + "\n")
index += 1
tokenizer_model_file = os.path.join(save_directory, "sentencepiece.bpe.model")
with open(tokenizer_model_file, "wb") as fi:
content_spiece_model = self.sp_model.serialized_model_proto()
fi.write(content_spiece_model)
return (vocab_file,)
__all__ = ["ErnieMTokenizer"]
| transformers/src/transformers/models/deprecated/ernie_m/tokenization_ernie_m.py/0 | {
"file_path": "transformers/src/transformers/models/deprecated/ernie_m/tokenization_ernie_m.py",
"repo_id": "transformers",
"token_count": 7523
} | 454 |
from .... import PretrainedConfig
class NezhaConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of an [`NezhaModel`]. It is used to instantiate an Nezha
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Nezha
[sijunhe/nezha-cn-base](https://huggingface.co/sijunhe/nezha-cn-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, optional, defaults to 21128):
Vocabulary size of the NEZHA model. Defines the different tokens that can be represented by the
*inputs_ids* passed to the forward method of [`NezhaModel`].
hidden_size (`int`, optional, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, optional, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, optional, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, optional, defaults to 3072):
The dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, optional, defaults to "gelu"):
The non-linear activation function (function or string) in the encoder and pooler.
hidden_dropout_prob (`float`, optional, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, optional, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, optional, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
(e.g., 512 or 1024 or 2048).
type_vocab_size (`int`, optional, defaults to 2):
The vocabulary size of the *token_type_ids* passed into [`NezhaModel`].
initializer_range (`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
classifier_dropout (`float`, optional, defaults to 0.1):
The dropout ratio for attached classifiers.
is_decoder (`bool`, *optional*, defaults to `False`):
Whether the model is used as a decoder or not. If `False`, the model is used as an encoder.
Example:
```python
>>> from transformers import NezhaConfig, NezhaModel
>>> # Initializing an Nezha configuration
>>> configuration = NezhaConfig()
>>> # Initializing a model (with random weights) from the Nezha-base style configuration model
>>> model = NezhaModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "nezha"
def __init__(
self,
vocab_size=21128,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
max_relative_position=64,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
classifier_dropout=0.1,
pad_token_id=0,
bos_token_id=2,
eos_token_id=3,
use_cache=True,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.max_relative_position = max_relative_position
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.classifier_dropout = classifier_dropout
self.use_cache = use_cache
__all__ = ["NezhaConfig"]
| transformers/src/transformers/models/deprecated/nezha/configuration_nezha.py/0 | {
"file_path": "transformers/src/transformers/models/deprecated/nezha/configuration_nezha.py",
"repo_id": "transformers",
"token_count": 1876
} | 455 |
# coding=utf-8
# Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
RetriBERT model
"""
import math
from typing import Optional
import torch
import torch.utils.checkpoint as checkpoint
from torch import nn
from ....modeling_utils import PreTrainedModel
from ....utils import add_start_docstrings, logging
from ...bert.modeling_bert import BertModel
from .configuration_retribert import RetriBertConfig
logger = logging.get_logger(__name__)
# INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL #
class RetriBertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config: RetriBertConfig
load_tf_weights = None
base_model_prefix = "retribert"
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
RETRIBERT_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`RetriBertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
@add_start_docstrings(
"""Bert Based model to embed queries or document for document retrieval.""",
RETRIBERT_START_DOCSTRING,
)
class RetriBertModel(RetriBertPreTrainedModel):
def __init__(self, config: RetriBertConfig) -> None:
super().__init__(config)
self.projection_dim = config.projection_dim
self.bert_query = BertModel(config)
self.bert_doc = None if config.share_encoders else BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.project_query = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
self.project_doc = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
self.ce_loss = nn.CrossEntropyLoss(reduction="mean")
# Initialize weights and apply final processing
self.post_init()
def embed_sentences_checkpointed(
self,
input_ids,
attention_mask,
sent_encoder,
checkpoint_batch_size=-1,
):
# reproduces BERT forward pass with checkpointing
if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size:
return sent_encoder(input_ids, attention_mask=attention_mask)[1]
else:
# prepare implicit variables
device = input_ids.device
input_shape = input_ids.size()
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
head_mask = [None] * sent_encoder.config.num_hidden_layers
extended_attention_mask: torch.Tensor = sent_encoder.get_extended_attention_mask(
attention_mask, input_shape
)
# define function for checkpointing
def partial_encode(*inputs):
encoder_outputs = sent_encoder.encoder(
inputs[0],
attention_mask=inputs[1],
head_mask=head_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = sent_encoder.pooler(sequence_output)
return pooled_output
# run embedding layer on everything at once
embedding_output = sent_encoder.embeddings(
input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None
)
# run encoding and pooling on one mini-batch at a time
pooled_output_list = []
for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)):
b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size]
pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask)
pooled_output_list.append(pooled_output)
return torch.cat(pooled_output_list, dim=0)
def embed_questions(
self,
input_ids,
attention_mask=None,
checkpoint_batch_size=-1,
):
q_reps = self.embed_sentences_checkpointed(
input_ids,
attention_mask,
self.bert_query,
checkpoint_batch_size,
)
return self.project_query(q_reps)
def embed_answers(
self,
input_ids,
attention_mask=None,
checkpoint_batch_size=-1,
):
a_reps = self.embed_sentences_checkpointed(
input_ids,
attention_mask,
self.bert_query if self.bert_doc is None else self.bert_doc,
checkpoint_batch_size,
)
return self.project_doc(a_reps)
def forward(
self,
input_ids_query: torch.LongTensor,
attention_mask_query: Optional[torch.FloatTensor],
input_ids_doc: torch.LongTensor,
attention_mask_doc: Optional[torch.FloatTensor],
checkpoint_batch_size: int = -1,
) -> torch.FloatTensor:
r"""
Args:
input_ids_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary for the queries in a batch.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask_query (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
input_ids_doc (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary for the documents in a batch.
attention_mask_doc (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on documents padding token indices.
checkpoint_batch_size (`int`, *optional*, defaults to `-1`):
If greater than 0, uses gradient checkpointing to only compute sequence representation on
`checkpoint_batch_size` examples at a time on the GPU. All query representations are still compared to
all document representations in the batch.
Return:
`torch.FloatTensor``: The bidirectional cross-entropy loss obtained while trying to match each query to its
corresponding document and each document to its corresponding query in the batch
"""
device = input_ids_query.device
q_reps = self.embed_questions(input_ids_query, attention_mask_query, checkpoint_batch_size)
a_reps = self.embed_answers(input_ids_doc, attention_mask_doc, checkpoint_batch_size)
compare_scores = torch.mm(q_reps, a_reps.t())
loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device))
loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device))
loss = (loss_qa + loss_aq) / 2
return loss
__all__ = ["RetriBertModel", "RetriBertPreTrainedModel"]
| transformers/src/transformers/models/deprecated/retribert/modeling_retribert.py/0 | {
"file_path": "transformers/src/transformers/models/deprecated/retribert/modeling_retribert.py",
"repo_id": "transformers",
"token_count": 3821
} | 456 |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Transformer XL checkpoint and datasets."""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.deprecated.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.deprecated.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
data_utils.Vocab = data_utils.TransfoXLTokenizer
data_utils.Corpus = data_utils.TransfoXLCorpus
sys.modules["data_utils"] = data_utils
sys.modules["vocabulary"] = data_utils
def convert_transfo_xl_checkpoint_to_pytorch(
tf_checkpoint_path, transfo_xl_config_file, pytorch_dump_folder_path, transfo_xl_dataset_file
):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(transfo_xl_dataset_file, "rb") as fp:
corpus = pickle.load(fp, encoding="latin1")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f"Save vocabulary to {pytorch_vocab_dump_path}")
corpus_vocab_dict = corpus.vocab.__dict__
torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
corpus_dict_no_vocab = corpus.__dict__
corpus_dict_no_vocab.pop("vocab", None)
pytorch_dataset_dump_path = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f"Save dataset to {pytorch_dataset_dump_path}")
torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
config_path = os.path.abspath(transfo_xl_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print(f"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.")
# Initialise PyTorch model
if transfo_xl_config_file == "":
config = TransfoXLConfig()
else:
config = TransfoXLConfig.from_json_file(transfo_xl_config_file)
print(f"Building PyTorch model from configuration: {config}")
model = TransfoXLLMHeadModel(config)
model = load_tf_weights_in_transfo_xl(model, config, tf_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print(f"Save PyTorch model to {os.path.abspath(pytorch_weights_dump_path)}")
torch.save(model.state_dict(), pytorch_weights_dump_path)
print(f"Save configuration file to {os.path.abspath(pytorch_config_dump_path)}")
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.\n"
"Given the files are in the pickle format, please be wary of passing it files you trust.",
)
args = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| transformers/src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/deprecated/transfo_xl/convert_transfo_xl_original_tf_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 2023
} | 457 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import os
from typing import Optional
import regex as re
import torch
from huggingface_hub import hf_hub_download
from transformers import (
DepthProConfig,
DepthProForDepthEstimation,
DepthProImageProcessorFast,
)
# fmt: off
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
# encoder
r"encoder.(patch|image)_encoder.cls_token": r"depth_pro.encoder.\1_encoder.model.embeddings.cls_token",
r"encoder.(patch|image)_encoder.pos_embed": r"depth_pro.encoder.\1_encoder.model.embeddings.position_embeddings",
r"encoder.(patch|image)_encoder.patch_embed.proj.(weight|bias)": r"depth_pro.encoder.\1_encoder.model.embeddings.patch_embeddings.projection.\2",
r"encoder.(patch|image)_encoder.blocks.(\d+).norm(\d+).(weight|bias)": r"depth_pro.encoder.\1_encoder.model.encoder.layer.\2.norm\3.\4",
r"encoder.(patch|image)_encoder.blocks.(\d+).attn.qkv.(weight|bias)": r"depth_pro.encoder.\1_encoder.model.encoder.layer.\2.attention.attention.(query|key|value).\3",
r"encoder.(patch|image)_encoder.blocks.(\d+).attn.proj.(weight|bias)": r"depth_pro.encoder.\1_encoder.model.encoder.layer.\2.attention.output.dense.\3",
r"encoder.(patch|image)_encoder.blocks.(\d+).ls(\d+).gamma": r"depth_pro.encoder.\1_encoder.model.encoder.layer.\2.layer_scale\3.lambda1",
r"encoder.(patch|image)_encoder.blocks.(\d+).mlp.fc(\d+).(weight|bias)": r"depth_pro.encoder.\1_encoder.model.encoder.layer.\2.mlp.fc\3.\4",
r"encoder.(patch|image)_encoder.norm.(weight|bias)": r"depth_pro.encoder.\1_encoder.model.layernorm.\2",
r"encoder.fuse_lowres.(weight|bias)": r"depth_pro.neck.fuse_image_with_low_res.\1",
# fov
r"fov.encoder.0.cls_token": r"fov_model.fov_encoder.model.embeddings.cls_token",
r"fov.encoder.0.pos_embed": r"fov_model.fov_encoder.model.embeddings.position_embeddings",
r"fov.encoder.0.patch_embed.proj.(weight|bias)": r"fov_model.fov_encoder.model.embeddings.patch_embeddings.projection.\1",
r"fov.encoder.0.blocks.(\d+).norm(\d+).(weight|bias)": r"fov_model.fov_encoder.model.encoder.layer.\1.norm\2.\3",
r"fov.encoder.0.blocks.(\d+).attn.qkv.(weight|bias)": r"fov_model.fov_encoder.model.encoder.layer.\1.attention.attention.(query|key|value).\2",
r"fov.encoder.0.blocks.(\d+).attn.proj.(weight|bias)": r"fov_model.fov_encoder.model.encoder.layer.\1.attention.output.dense.\2",
r"fov.encoder.0.blocks.(\d+).ls(\d+).gamma": r"fov_model.fov_encoder.model.encoder.layer.\1.layer_scale\2.lambda1",
r"fov.encoder.0.blocks.(\d+).mlp.fc(\d+).(weight|bias)": r"fov_model.fov_encoder.model.encoder.layer.\1.mlp.fc\2.\3",
r"fov.encoder.0.norm.(weight|bias)": r"fov_model.fov_encoder.model.layernorm.\1",
r"fov.downsample.0.(weight|bias)": r"fov_model.conv.\1",
r"fov.encoder.1.(weight|bias)": r"fov_model.fov_encoder.neck.\1",
r"fov.head.(\d+).(weight|bias)": r"fov_model.head.layers.\1.\2",
# head
r"head.(\d+).(weight|bias)": r"head.layers.\1.\2",
# upsamples
r"encoder.upsample_lowres.(weight|bias)": r"depth_pro.neck.feature_upsample.image_block.layers.0.\1",
r"encoder.upsample_latent(\d+).(\d+).(weight|bias)": lambda match: (
f"depth_pro.neck.feature_upsample.intermediate.{1-int(match.group(1))}.layers.{match.group(2)}.{match.group(3)}"
),
r"encoder.upsample(\d+).(\d+).(weight|bias)": lambda match: (
f"depth_pro.neck.feature_upsample.scaled_images.{2-int(match.group(1))}.layers.{match.group(2)}.{match.group(3)}"
),
# projections between encoder and fusion
r"decoder.convs.(\d+).weight": lambda match: (
f"depth_pro.neck.feature_projection.projections.{4-int(match.group(1))}.weight"
),
# fusion stage
r"decoder.fusions.([1234]).resnet(\d+).residual.(\d+).(weight|bias)": lambda match: (
f"fusion_stage.intermediate.{4-int(match.group(1))}.residual_layer{match.group(2)}.convolution{(int(match.group(3))+1)//2}.{match.group(4)}"
),
r"decoder.fusions.0.resnet(\d+).residual.(\d+).(weight|bias)": lambda match: (
f"fusion_stage.final.residual_layer{match.group(1)}.convolution{(int(match.group(2))+1)//2}.{match.group(3)}"
),
r"decoder.fusions.([1234]).out_conv.(weight|bias)": lambda match: (
f"fusion_stage.intermediate.{4-int(match.group(1))}.projection.{match.group(2)}"
),
r"decoder.fusions.0.out_conv.(weight|bias)": lambda match: (
f"fusion_stage.final.projection.{match.group(1)}"
),
r"decoder.fusions.(\d+).deconv.(weight|bias)": lambda match: (
f"fusion_stage.intermediate.{4-int(match.group(1))}.deconv.{match.group(2)}"
),
}
# fmt: on
def convert_old_keys_to_new_keys(state_dict_keys: Optional[dict] = None):
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in ORIGINAL_TO_CONVERTED_KEY_MAPPING.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
def get_qkv_state_dict(key, parameter):
"""
new key which looks like this
xxxx.(q|k|v).xxx (m, n)
is converted to
xxxx.q.xxxx (m//3, n)
xxxx.k.xxxx (m//3, n)
xxxx.v.xxxx (m//3, n)
"""
qkv_state_dict = {}
placeholder = re.search(r"(\(.*?\))", key).group(1) # finds "(query|key|value)"
replacements_keys = placeholder[1:-1].split("|") # creates ['query', 'key', 'value']
replacements_vals = torch.split(
parameter, split_size_or_sections=parameter.size(0) // len(replacements_keys), dim=0
)
for replacement_key, replacement_val in zip(replacements_keys, replacements_vals):
qkv_state_dict[key.replace(placeholder, replacement_key)] = replacement_val
return qkv_state_dict
def write_model(
hf_repo_id: str,
output_dir: str,
safe_serialization: bool = True,
):
os.makedirs(output_dir, exist_ok=True)
# ------------------------------------------------------------
# Create and save config
# ------------------------------------------------------------
# create config
backbone_config = {
"model_type": "dinov2",
"num_hidden_layers": 24,
"patch_size": 16,
"hidden_size": 1024,
"num_attention_heads": 16,
"image_size": 384,
"use_mask_token": False,
}
config = DepthProConfig(
# original implementation uses same config for all 3 models
image_model_config=backbone_config,
patch_model_config=backbone_config,
fov_model_config=backbone_config,
use_fov_model=True,
)
# save config
config.save_pretrained(output_dir)
print("Model config saved successfully...")
# ------------------------------------------------------------
# Convert weights
# ------------------------------------------------------------
# download and load state_dict from hf repo
file_path = hf_hub_download(hf_repo_id, "depth_pro.pt")
loaded = torch.load(file_path, weights_only=True)
print("Converting model...")
all_keys = list(loaded.keys())
new_keys = convert_old_keys_to_new_keys(all_keys)
state_dict = {}
for key in all_keys:
new_key = new_keys[key]
current_parameter = loaded.pop(key)
if "qkv" in key:
qkv_state_dict = get_qkv_state_dict(new_key, current_parameter)
state_dict.update(qkv_state_dict)
else:
state_dict[new_key] = current_parameter
print("Loading the checkpoint in a DepthPro model.")
model = DepthProForDepthEstimation(config)
model.load_state_dict(state_dict, strict=True, assign=True)
print("Checkpoint loaded successfully.")
print("Saving the model.")
model.save_pretrained(output_dir, safe_serialization=safe_serialization)
del state_dict, model
# Safety check: reload the converted model
gc.collect()
print("Reloading the model to check if it's saved correctly.")
model = DepthProForDepthEstimation.from_pretrained(output_dir, device_map="auto")
print("Model reloaded successfully.")
return model
def write_image_processor(output_dir: str):
image_processor = DepthProImageProcessorFast()
image_processor.save_pretrained(output_dir)
return image_processor
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--hf_repo_id",
default="apple/DepthPro",
help="Location of official weights from apple on HF",
)
parser.add_argument(
"--output_dir",
default="apple_DepthPro",
help="Location to write the converted model and processor",
)
parser.add_argument(
"--safe_serialization", default=True, type=bool, help="Whether or not to save using `safetensors`."
)
parser.add_argument(
"--push_to_hub",
action=argparse.BooleanOptionalAction,
help="Whether or not to push the converted model to the huggingface hub.",
)
parser.add_argument(
"--hub_repo_id",
default="apple/DepthPro-hf",
help="Huggingface hub repo to write the converted model and processor",
)
args = parser.parse_args()
model = write_model(
hf_repo_id=args.hf_repo_id,
output_dir=args.output_dir,
safe_serialization=args.safe_serialization,
)
image_processor = write_image_processor(
output_dir=args.output_dir,
)
if args.push_to_hub:
print("Pushing to hub...")
model.push_to_hub(args.hub_repo_id)
image_processor.push_to_hub(args.hub_repo_id)
if __name__ == "__main__":
main()
| transformers/src/transformers/models/depth_pro/convert_depth_pro_weights_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/depth_pro/convert_depth_pro_weights_to_hf.py",
"repo_id": "transformers",
"token_count": 5058
} | 458 |
# coding=utf-8
# Copyright 2025 The Nari Labs and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Callable, Optional, Union
import torch
import torch.distributed as dist
from ...generation.logits_process import (
DiaClassifierFreeGuidanceLogitsProcessor,
DiaEOSChannelFilterLogitsProcessor,
DiaEOSDelayPatternLogitsProcessor,
LogitsProcessorList,
TemperatureLogitsWarper,
)
from ...generation.stopping_criteria import StoppingCriteriaList
from ...generation.streamers import BaseStreamer
from ...generation.utils import GenerateOutput, GenerationConfig, GenerationMixin, GenerationMode
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
from ...integrations.fsdp import is_fsdp_managed_module
from ...modeling_utils import PreTrainedModel
from ...utils import logging
logger = logging.get_logger(__name__)
class DiaGenerationMixin(GenerationMixin):
# Indicates CFG which needs preparation to be properly handled by repeats
_uses_cfg = None
def _get_logits_processor(
self,
generation_config: GenerationConfig,
input_ids_seq_length: Optional[int] = None,
encoder_input_ids: torch.LongTensor = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], list[int]]] = None,
logits_processor: Optional[LogitsProcessorList] = None,
device: Optional[str] = None,
model_kwargs: Optional[dict[str, Any]] = None,
negative_prompt_ids: Optional[torch.Tensor] = None,
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
) -> LogitsProcessorList:
# Need either custom order or custom processor instead
# (Temporarily disabling those for the super function)
original_guidance_scale = generation_config.guidance_scale
original_temperature = generation_config.temperature
generation_config.guidance_scale = None
generation_config.temperature = None
# Get base processors and those we can integrate easily
custom_processors = LogitsProcessorList()
if original_temperature is not None and original_temperature != 1.0:
custom_processors.append(TemperatureLogitsWarper(original_temperature))
custom_processors.append(
DiaEOSChannelFilterLogitsProcessor(
num_channels=len(self.config.delay_pattern),
eos_token_id=self.config.eos_token_id,
)
)
merged_processors = super()._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_seq_length,
encoder_input_ids=encoder_input_ids,
prefix_allowed_tokens_fn=None,
logits_processor=custom_processors,
device=device,
model_kwargs=model_kwargs,
negative_prompt_ids=negative_prompt_ids,
negative_prompt_attention_mask=negative_prompt_attention_mask,
)
# Custom processors we need at specific positions
if original_guidance_scale is not None and original_guidance_scale != 1:
cfg_processor = DiaClassifierFreeGuidanceLogitsProcessor(
guidance_scale=original_guidance_scale,
guidance_top_k=generation_config.top_k,
)
merged_processors.insert(0, cfg_processor)
merged_processors.append(
DiaEOSDelayPatternLogitsProcessor(
delay_pattern=self.config.delay_pattern,
eos_token_id=self.config.eos_token_id,
max_generation_len=generation_config.max_length,
device=device,
)
)
# Enable temporarily disabled values back
generation_config.guidance_scale = original_guidance_scale
generation_config.temperature = original_temperature
return merged_processors
def _prepare_generation_config(
self, generation_config: Optional[GenerationConfig], use_model_defaults: Optional[bool] = None, **kwargs: dict
) -> tuple[GenerationConfig, dict]:
generation_config, model_kwargs = super()._prepare_generation_config(
generation_config, use_model_defaults, **kwargs
)
# We allow generation up to max length + max delay pattern
# (will revert back to max length after generation)
generation_config.max_length += max(self.config.delay_pattern)
# Internal flag to indicate CFG that needs to prepare unconditioned input
self._uses_cfg = generation_config.guidance_scale is not None and generation_config.guidance_scale != 1
return generation_config, model_kwargs
def _prepare_model_inputs(
self,
inputs: Optional[torch.Tensor] = None,
bos_token_id: Optional[torch.Tensor] = None,
model_kwargs: Optional[dict[str, torch.Tensor]] = None,
) -> tuple[torch.Tensor, Optional[str], dict[str, torch.Tensor]]:
inputs, input_name, model_kwargs = super()._prepare_model_inputs(
inputs=inputs,
bos_token_id=bos_token_id,
model_kwargs=model_kwargs,
)
# If CFG is requested we fill in the unconditioned parts
if self._uses_cfg:
unconditioned_inputs = torch.zeros_like(inputs)
inputs = torch.cat([inputs, unconditioned_inputs], dim=0)
if model_kwargs.get("attention_mask", None) is not None:
model_kwargs["attention_mask"] = model_kwargs["attention_mask"].repeat(2, 1)
return inputs, input_name, model_kwargs
def _prepare_decoder_input_ids_for_generation(
self,
batch_size: int,
model_input_name: str,
model_kwargs: dict[str, torch.Tensor],
decoder_start_token_id: torch.Tensor,
device: Optional[torch.device] = None,
) -> tuple[torch.LongTensor, dict[str, torch.Tensor]]:
"""Prepares `decoder_input_ids` for generation with encoder-decoder models"""
# 1. Check whether the user has defined `decoder_input_ids` and `decoder_attention_mask`; if not error out
decoder_input_ids = decoder_attention_mask = None
if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
decoder_input_ids = model_kwargs.pop("decoder_input_ids")
if model_kwargs is not None and "decoder_attention_mask" in model_kwargs:
decoder_attention_mask = model_kwargs.pop("decoder_attention_mask")
# We allow generating without preparation (no proper delay) but discourage it
if decoder_input_ids is None or decoder_attention_mask is None:
logger.warning_once(
"In order to generate with Dia, we need the processed audio input: Got `decoder_input_ids`:"
f" {decoder_input_ids is not None} and got `decoder_attention_mask`={decoder_attention_mask is not None}."
f" This can be achieved via the [`DiaProcessor`] but now defaulting to non-delayed generation."
)
num_channels = self.config.decoder_config.num_channels
real_batch_size = batch_size // 2 if self._uses_cfg else batch_size
if decoder_input_ids is None:
decoder_input_ids = torch.full(
(real_batch_size, 1, num_channels), decoder_start_token_id, dtype=torch.long, device=device
)
decoder_attention_mask = torch.ones(
size=(real_batch_size, decoder_input_ids.shape[1]), dtype=torch.long, device=device
)
# 2. Determine the valid input and what works as mask within the input
delay_mask = decoder_input_ids.long()
valid_input_size = (
decoder_input_ids.shape[1] - (decoder_input_ids[:, :, 0] == self.config.pad_token_id).sum(dim=-1).max()
)
decoder_input_ids = delay_mask[:, :valid_input_size].transpose(1, 2).long()
decoder_attention_mask = decoder_attention_mask[:, :valid_input_size].long()
# 3. Overwrite into model kwargs
model_kwargs["decoder_attention_mask"] = decoder_attention_mask
model_kwargs["decoder_delay_mask"] = delay_mask
return decoder_input_ids, model_kwargs
def prepare_inputs_for_generation(
self,
input_ids,
encoder_outputs=None, # Using this to easily get the batch size
decoder_delay_mask=None,
**kwargs,
):
# Reshape decoder input_ids to 3D to be compile friendly and to fit the expected model input shape
batch_size = encoder_outputs[0].shape[0] // 2 if self._uses_cfg else encoder_outputs[0].shape[0]
input_ids = input_ids.reshape(batch_size, self.config.decoder_config.num_channels, -1).transpose(1, 2)
# Base method handles most things except CFG and the delay pattern mask
model_inputs = super().prepare_inputs_for_generation(input_ids, encoder_outputs=encoder_outputs, **kwargs)
# Post processing for CFG and overwriting via delay pattern mask
# 1. Delay pattern mask -- force tokens if not allowed to predict (!= pad_token in mask)
model_inputs["decoder_input_ids"] = self.apply_delay_mask(
input_ids, self.config.pad_token_id, decoder_delay_mask
)
# Depending on cache usage we need to pass all or just one
if model_inputs.get("use_cache", False) and model_inputs["cache_position"][0] > 0:
model_inputs["decoder_input_ids"] = model_inputs["decoder_input_ids"][:, -1, :][:, None, :]
# Be compile friendly
model_inputs["decoder_input_ids"] = model_inputs["decoder_input_ids"].contiguous()
# 2. Apply CFG duplication if needed
if self._uses_cfg:
for key in ["decoder_input_ids", "decoder_attention_mask", "decoder_position_ids"]:
if model_inputs.get(key, None) is not None:
# double first dimension and keep everything else the same
repeat_pattern = tuple([2] + [1] * (model_inputs[key].ndim - 1))
model_inputs[key] = model_inputs[key].repeat(*repeat_pattern)
return model_inputs
@staticmethod
def apply_delay_mask(input_ids: torch.Tensor, pad_id: int, delay_mask: Optional[torch.Tensor]) -> torch.Tensor:
if delay_mask is None:
return input_ids
mask_len = min(input_ids.shape[1], delay_mask.shape[1])
valid_mask = delay_mask[:, :mask_len, :]
valid_input = input_ids[:, :mask_len, :]
# Overwrite the respective parts of the input
input_ids[:, :mask_len, :] = torch.where(valid_mask == pad_id, valid_input, valid_mask)
return input_ids
def _main_generate_loop(
self,
inputs: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], list[int]]] = None,
synced_gpus: Optional[bool] = None,
assistant_model: Optional["PreTrainedModel"] = None,
streamer: Optional["BaseStreamer"] = None,
negative_prompt_ids: Optional[torch.Tensor] = None,
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
use_model_defaults: Optional[bool] = None,
custom_generate: Optional[str] = None,
**kwargs,
):
# ********** mostly taken from main generate function up to calling the different methods (see NOTE) **********
# 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
tokenizer = kwargs.pop("tokenizer", None) # Pull this out first, we only use it for stopping criteria
assistant_tokenizer = kwargs.pop("assistant_tokenizer", None) # only used for assisted generation
generation_config, model_kwargs = self._prepare_generation_config(
generation_config, use_model_defaults, **kwargs
)
self._validate_model_kwargs(model_kwargs.copy())
self._validate_assistant(assistant_model, tokenizer, assistant_tokenizer)
# 2. Set generation parameters if not already defined
if synced_gpus is None:
synced_gpus = (is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)) and dist.get_world_size() > 1
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
# 3. Define model inputs
kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(
inputs, generation_config.bos_token_id, model_kwargs
)
batch_size = inputs_tensor.shape[0]
device = inputs_tensor.device
self._prepare_special_tokens(generation_config, kwargs_has_attention_mask, device=device)
# 4. Define other model kwargs
if "encoder_outputs" not in model_kwargs:
# if model is encoder decoder encoder_outputs are created and added to `model_kwargs`
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(
inputs_tensor, model_kwargs, model_input_name, generation_config
)
# 5. Prepare `input_ids` which will be used for auto-regressive generation
input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(
batch_size=batch_size,
model_input_name=model_input_name,
model_kwargs=model_kwargs,
decoder_start_token_id=generation_config._decoder_start_token_tensor,
device=inputs_tensor.device,
)
if generation_config.token_healing:
input_ids = self.heal_tokens(input_ids, tokenizer)
if streamer is not None:
streamer.put(input_ids.cpu())
# 6. Prepare `max_length` depending on other stopping criteria.
# NOTE: incorrect `input_ids.shape[1]` previously
input_ids_length = input_ids.shape[-1]
has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None
generation_config = self._prepare_generated_length(
generation_config=generation_config,
has_default_max_length=has_default_max_length,
has_default_min_length=has_default_min_length,
model_input_name=model_input_name,
inputs_tensor=inputs_tensor,
input_ids_length=input_ids_length,
)
# If the model supports `logits_to_keep` in forward(), set it to 1 to avoid computing the whole
# logit matrix. This can save a lot of memory during the first forward pass. Note that assisted decoding
# dynamically overrides this value as it can need more than the last token logits
if self._supports_logits_to_keep() and "logits_to_keep" not in model_kwargs:
model_kwargs["logits_to_keep"] = 1
self._validate_generated_length(generation_config, input_ids_length, has_default_max_length)
# 7. Prepare the cache.
# - `model_kwargs` may be updated in place with a cache as defined by the parameters in `generation_config`.
# - different models have a different cache name expected by the model (default = "past_key_values")
# - `max_length`, prepared above, is used to determine the maximum cache length
max_cache_length = generation_config.max_length - 1
if (
inputs_tensor.shape[1] != input_ids_length
and model_input_name == "inputs_embeds"
and not self.config.is_encoder_decoder
):
max_cache_length += inputs_tensor.shape[1]
self._prepare_cache_for_generation(
generation_config, model_kwargs, assistant_model, batch_size, max_cache_length
)
# 8. determine generation mode
generation_mode = generation_config.get_generation_mode(assistant_model)
if streamer is not None and (generation_config.num_beams > 1):
raise ValueError(
"`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1."
)
# 9. prepare logits processors and stopping criteria
prepared_logits_processor = self._get_logits_processor(
generation_config=generation_config,
input_ids_seq_length=input_ids_length,
encoder_input_ids=inputs_tensor,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
logits_processor=logits_processor,
device=inputs_tensor.device,
model_kwargs=model_kwargs,
negative_prompt_ids=negative_prompt_ids,
negative_prompt_attention_mask=negative_prompt_attention_mask,
)
prepared_stopping_criteria = self._get_stopping_criteria(
generation_config=generation_config, stopping_criteria=stopping_criteria, tokenizer=tokenizer, **kwargs
)
# Set model_kwargs `use_cache` so we can use it later in forward runs
model_kwargs["use_cache"] = generation_config.use_cache
# ******************* taken from main generate function up to calling the different methods *******************
# Prepare inner 2D logic in generation loop
input_ids = input_ids.reshape(-1, input_ids.shape[-1])
# 10. go into different generation modes
if generation_mode in (GenerationMode.SAMPLE, GenerationMode.GREEDY_SEARCH):
# 11. expand input_ids with `num_return_sequences` additional sequences per batch
if generation_config.num_return_sequences > 1:
raise ValueError("`num_return_sequences>1` is incompatible with Dia.")
# 12. run sample (it degenerates to greedy search when `generation_config.do_sample=False`)
return self._sample(
input_ids,
logits_processor=prepared_logits_processor,
stopping_criteria=prepared_stopping_criteria,
generation_config=generation_config,
synced_gpus=synced_gpus,
streamer=streamer,
**model_kwargs,
)
else:
raise ValueError(
"Got incompatible mode for generation, should be one of greedy or sampling. "
"Ensure that beam search is de-activated by setting `num_beams=1` and `num_beam_groups=1`."
)
@torch.no_grad()
def generate(
self,
inputs: Optional[torch.Tensor] = None,
generation_config: Optional[GenerationConfig] = None,
logits_processor: Optional[LogitsProcessorList] = None,
stopping_criteria: Optional[StoppingCriteriaList] = None,
prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], list[int]]] = None,
synced_gpus: Optional[bool] = None,
assistant_model: Optional["PreTrainedModel"] = None,
streamer: Optional["BaseStreamer"] = None,
negative_prompt_ids: Optional[torch.Tensor] = None,
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
use_model_defaults: Optional[bool] = None,
custom_generate: Optional[str] = None,
**kwargs,
) -> Union[GenerateOutput, torch.LongTensor]:
# We expect the initial input ids to be the complete mask (delayed input)
delay_mask = kwargs.get("decoder_input_ids")
if delay_mask is not None:
delay_mask = delay_mask.clone()
output = self._main_generate_loop(
inputs=inputs,
generation_config=generation_config,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
synced_gpus=synced_gpus,
assistant_model=assistant_model,
streamer=streamer,
negative_prompt_ids=negative_prompt_ids,
negative_prompt_attention_mask=negative_prompt_attention_mask,
use_model_defaults=use_model_defaults,
custom_generate=custom_generate,
**kwargs,
)
return_dict_in_generate = not isinstance(output, torch.Tensor)
if return_dict_in_generate:
output_sequences = output.sequences
else:
output_sequences = output
# Reshape from 2D (bsz * channels, seq_len) to 3D (bsz, seq_len, channels)
num_channels = self.config.decoder_config.num_channels
bsz = output_sequences.shape[0] // num_channels
output_sequences = output_sequences.reshape(bsz, num_channels, -1).transpose(1, 2)
# Apply delay mask
output_sequences = self.apply_delay_mask(output_sequences, self.config.pad_token_id, delay_mask)
if return_dict_in_generate:
output.sequences = output_sequences
else:
output = output_sequences
return output
| transformers/src/transformers/models/dia/generation_dia.py/0 | {
"file_path": "transformers/src/transformers/models/dia/generation_dia.py",
"repo_id": "transformers",
"token_count": 8972
} | 459 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/dinov3_vit/modular_dinov3_vit.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_dinov3_vit.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 Meta AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Callable, Optional
import numpy as np
import torch
from torch import nn
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutputWithPooling
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...pytorch_utils import compile_compatible_method_lru_cache
from ...utils import TransformersKwargs, auto_docstring
from ...utils.generic import check_model_inputs
from .configuration_dinov3_vit import DINOv3ViTConfig
class DINOv3ViTEmbeddings(nn.Module):
"""
Construct the CLS token, mask token, position and patch embeddings.
"""
def __init__(self, config: DINOv3ViTConfig):
super().__init__()
self.config = config
self.cls_token = nn.Parameter(torch.randn(1, 1, config.hidden_size))
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.register_tokens = nn.Parameter(torch.empty(1, config.num_register_tokens, config.hidden_size))
self.patch_embeddings = nn.Conv2d(
config.num_channels, config.hidden_size, kernel_size=config.patch_size, stride=config.patch_size
)
def forward(self, pixel_values: torch.Tensor, bool_masked_pos: Optional[torch.Tensor] = None) -> torch.Tensor:
batch_size = pixel_values.shape[0]
target_dtype = self.patch_embeddings.weight.dtype
# (batch_size, num_channels, height, width) -> (batch_size, num_patches, hidden_size)
patch_embeddings = self.patch_embeddings(pixel_values.to(dtype=target_dtype))
patch_embeddings = patch_embeddings.flatten(2).transpose(1, 2)
if bool_masked_pos is not None:
mask_token = self.mask_token.to(patch_embeddings.dtype)
patch_embeddings = torch.where(bool_masked_pos.unsqueeze(-1), mask_token, patch_embeddings)
# Add CLS and register tokens
cls_token = self.cls_token.expand(batch_size, -1, -1)
register_tokens = self.register_tokens.expand(batch_size, -1, -1)
embeddings = torch.cat([cls_token, register_tokens, patch_embeddings], dim=1)
return embeddings
@compile_compatible_method_lru_cache(maxsize=32)
def get_patches_center_coordinates(
num_patches_h: int, num_patches_w: int, dtype: torch.dtype, device: torch.device
) -> torch.Tensor:
"""
Computes the 2D coordinates of the centers of image patches, normalized to the range [-1, +1].
The center of each patch is exactly halfway between its top-left and bottom-right corners.
Args:
num_patches_h (int): Number of patches along the vertical (height) axis.
num_patches_w (int): Number of patches along the horizontal (width) axis.
dtype (torch.dtype): The desired data type of the returned tensor.
Returns:
torch.Tensor: A tensor of shape (height * width, 2), where each row contains the (y, x)
coordinates of a patch center, normalized to [-1, +1].
"""
coords_h = torch.arange(0.5, num_patches_h, dtype=dtype, device=device)
coords_w = torch.arange(0.5, num_patches_w, dtype=dtype, device=device)
coords_h = coords_h / num_patches_h
coords_w = coords_w / num_patches_w
# (height, width, 2) -> (height * width, 2)
coords = torch.stack(torch.meshgrid(coords_h, coords_w, indexing="ij"), dim=-1)
coords = coords.flatten(0, 1)
# Shift range [0, 1] to [-1, +1]
coords = 2.0 * coords - 1.0
return coords
def augment_patches_center_coordinates(
coords: torch.Tensor,
shift: Optional[float] = None,
jitter: Optional[float] = None,
rescale: Optional[float] = None,
) -> torch.Tensor:
# Shift coords by adding a uniform value in [-shift, shift]
if shift is not None:
shift_hw = torch.empty((1, 2), device=coords.device, dtype=coords.dtype)
shift_hw = shift_hw.uniform_(-shift, shift)
coords = coords + shift_hw
# Jitter coords by multiplying the range [-1, 1] by a log-uniform value in [1/jitter, jitter]
if jitter is not None:
jitter_range = np.log(jitter)
jitter_hw = torch.empty((1, 2), device=coords.device, dtype=coords.dtype)
jitter_hw = jitter_hw.uniform_(-jitter_range, jitter_range).exp()
coords = coords * jitter_hw
# Rescale coords by multiplying the range [-1, 1] by a log-uniform value in [1/rescale, rescale]
if rescale is not None:
rescale_range = np.log(rescale)
rescale_hw = torch.empty(1, device=coords.device, dtype=coords.dtype)
rescale_hw = rescale_hw.uniform_(-rescale_range, rescale_range).exp()
coords = coords * rescale_hw
return coords
class DINOv3ViTRopePositionEmbedding(nn.Module):
inv_freq: torch.Tensor
def __init__(self, config: DINOv3ViTConfig):
super().__init__()
self.config = config
self.base = config.rope_theta
self.head_dim = config.hidden_size // config.num_attention_heads
self.num_patches_h = config.image_size // config.patch_size
self.num_patches_w = config.image_size // config.patch_size
inv_freq = 1 / self.base ** torch.arange(0, 1, 4 / self.head_dim, dtype=torch.float32) # (head_dim / 4,)
self.register_buffer("inv_freq", inv_freq, persistent=False)
def forward(self, pixel_values: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
_, _, height, width = pixel_values.shape
num_patches_h = height // self.config.patch_size
num_patches_w = width // self.config.patch_size
device = pixel_values.device
device_type = device.type if isinstance(device.type, str) and device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
# Although we could precompute static patch_coords from image_size and patch_size in the config,
# the model was trained with random_scale, so it can process images of varying sizes.
# Therefore, it's better to compute patch_coords dynamically (with lru_cache).
patch_coords = get_patches_center_coordinates(
num_patches_h, num_patches_w, dtype=torch.float32, device=device
)
if self.training:
patch_coords = augment_patches_center_coordinates(
patch_coords,
shift=self.config.pos_embed_shift,
jitter=self.config.pos_embed_jitter,
rescale=self.config.pos_embed_rescale,
)
# (height * width, 2, head_dim / 4) -> (height * width, head_dim / 2) -> (height * width, head_dim)
angles = 2 * math.pi * patch_coords[:, :, None] * self.inv_freq[None, None, :]
angles = angles.flatten(1, 2)
angles = angles.tile(2)
cos = torch.cos(angles)
sin = torch.sin(angles)
dtype = pixel_values.dtype
return cos.to(dtype=dtype), sin.to(dtype=dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
# Normalize the attention scores to probabilities.
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
# Mask heads if we want to
if attention_mask is not None:
attn_weights = attn_weights * attention_mask
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def apply_rotary_pos_emb(
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, **kwargs
) -> tuple[torch.Tensor, torch.Tensor]:
"""Applies Rotary Position Embedding to the query and key tensors, but only to the patch tokens,
ignoring the prefix tokens (cls token and register tokens).
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
num_tokens = q.shape[-2]
num_patches = sin.shape[-2]
num_prefix_tokens = num_tokens - num_patches # cls token + register tokens
q_prefix_tokens, q_patches = q.split((num_prefix_tokens, num_patches), dim=-2)
k_prefix_tokens, k_patches = k.split((num_prefix_tokens, num_patches), dim=-2)
# apply rope only to patch tokens
q_patches = (q_patches * cos) + (rotate_half(q_patches) * sin)
k_patches = (k_patches * cos) + (rotate_half(k_patches) * sin)
q = torch.cat((q_prefix_tokens, q_patches), dim=-2)
k = torch.cat((k_prefix_tokens, k_patches), dim=-2)
return q, k
class DINOv3ViTAttention(nn.Module):
"""
Multi-headed attention compatible with ALL_ATTENTION_FUNCTIONS.
"""
def __init__(self, config: DINOv3ViTConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
self.is_causal = False
self.scaling = self.head_dim**-0.5
self.is_causal = False
self.dropout = config.attention_dropout
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.key_bias)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.value_bias)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.query_bias)
self.o_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.proj_bias)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, patches, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(batch_size, patches, self.num_heads, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, patches, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class DINOv3ViTLayerScale(nn.Module):
def __init__(self, config) -> None:
super().__init__()
self.lambda1 = nn.Parameter(config.layerscale_value * torch.ones(config.hidden_size))
def forward(self, hidden_state: torch.Tensor) -> torch.Tensor:
return hidden_state * self.lambda1
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
argument.
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
class DINOv3ViTDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
class DINOv3ViTMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
return self.down_proj(self.act_fn(self.up_proj(x)))
class DINOv3ViTGatedMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
class DINOv3ViTLayer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the original implementation."""
def __init__(self, config: DINOv3ViTConfig):
super().__init__()
self.norm1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention = DINOv3ViTAttention(config)
self.layer_scale1 = DINOv3ViTLayerScale(config)
self.drop_path = DINOv3ViTDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
if config.use_gated_mlp:
self.mlp = DINOv3ViTGatedMLP(config)
else:
self.mlp = DINOv3ViTMLP(config)
self.layer_scale2 = DINOv3ViTLayerScale(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
) -> torch.Tensor:
# Attention with residual connection
residual = hidden_states
hidden_states = self.norm1(hidden_states)
hidden_states, _ = self.attention(
hidden_states,
attention_mask=attention_mask,
position_embeddings=position_embeddings,
)
hidden_states = self.layer_scale1(hidden_states)
hidden_states = self.drop_path(hidden_states) + residual
# MLP with residual connection
residual = hidden_states
hidden_states = self.norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.layer_scale2(hidden_states)
hidden_states = self.drop_path(hidden_states) + residual
return hidden_states
@auto_docstring
class DINOv3ViTPreTrainedModel(PreTrainedModel):
config: DINOv3ViTConfig
base_model_prefix = "dinov3_vit"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["DINOv3ViTLayer"]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": DINOv3ViTLayer,
"attentions": DINOv3ViTAttention,
}
def _init_weights(self, module) -> None:
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
# `trunc_normal_cpu` not implemented in `half` issues
module.weight.data = nn.init.trunc_normal_(
module.weight.data.to(torch.float32),
mean=0.0,
std=self.config.initializer_range,
).to(module.weight.dtype)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, DINOv3ViTEmbeddings):
module.cls_token.data = nn.init.trunc_normal_(
module.cls_token.data.to(torch.float32),
mean=0.0,
std=self.config.initializer_range,
).to(module.cls_token.dtype)
if module.config.num_register_tokens > 0:
module.register_tokens.data = nn.init.trunc_normal_(
module.register_tokens.data.to(torch.float32),
mean=0.0,
std=self.config.initializer_range,
).to(module.register_tokens.dtype)
module.mask_token.data.zero_()
elif isinstance(module, DINOv3ViTLayerScale):
module.lambda1.data.fill_(self.config.layerscale_value)
@auto_docstring
class DINOv3ViTModel(DINOv3ViTPreTrainedModel):
def __init__(self, config: DINOv3ViTConfig):
super().__init__(config)
self.config = config
self.embeddings = DINOv3ViTEmbeddings(config)
self.rope_embeddings = DINOv3ViTRopePositionEmbedding(config)
self.layer = nn.ModuleList([DINOv3ViTLayer(config) for _ in range(config.num_hidden_layers)])
self.norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@check_model_inputs
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPooling:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, sequence_length)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0). Only relevant for
pre-training.
"""
pixel_values = pixel_values.to(self.embeddings.patch_embeddings.weight.dtype)
hidden_states = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
position_embeddings = self.rope_embeddings(pixel_values)
for i, layer_module in enumerate(self.layer):
layer_head_mask = head_mask[i] if head_mask is not None else None
hidden_states = layer_module(
hidden_states,
attention_mask=layer_head_mask,
position_embeddings=position_embeddings,
)
sequence_output = self.norm(hidden_states)
pooled_output = sequence_output[:, 0, :]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
)
__all__ = ["DINOv3ViTModel", "DINOv3ViTPreTrainedModel"]
| transformers/src/transformers/models/dinov3_vit/modeling_dinov3_vit.py/0 | {
"file_path": "transformers/src/transformers/models/dinov3_vit/modeling_dinov3_vit.py",
"repo_id": "transformers",
"token_count": 9715
} | 460 |
# coding=utf-8
# Copyright 2018 DPR Authors, The Hugging Face Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow DPR model for Open Domain Question Answering."""
from __future__ import annotations
from dataclasses import dataclass
import tensorflow as tf
from ...modeling_tf_outputs import TFBaseModelOutputWithPooling
from ...modeling_tf_utils import TFModelInputType, TFPreTrainedModel, get_initializer, keras, shape_list, unpack_inputs
from ...utils import (
ModelOutput,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ..bert.modeling_tf_bert import TFBertMainLayer
from .configuration_dpr import DPRConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "DPRConfig"
##########
# Outputs
##########
@dataclass
class TFDPRContextEncoderOutput(ModelOutput):
r"""
Class for outputs of [`TFDPRContextEncoder`].
Args:
pooler_output (`tf.Tensor` of shape `(batch_size, embeddings_size)`):
The DPR encoder outputs the *pooler_output* that corresponds to the context representation. Last layer
hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
This output is to be used to embed contexts for nearest neighbors queries with questions embeddings.
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
pooler_output: tf.Tensor | None = None
hidden_states: tuple[tf.Tensor, ...] | None = None
attentions: tuple[tf.Tensor, ...] | None = None
@dataclass
class TFDPRQuestionEncoderOutput(ModelOutput):
"""
Class for outputs of [`TFDPRQuestionEncoder`].
Args:
pooler_output (`tf.Tensor` of shape `(batch_size, embeddings_size)`):
The DPR encoder outputs the *pooler_output* that corresponds to the question representation. Last layer
hidden-state of the first token of the sequence (classification token) further processed by a Linear layer.
This output is to be used to embed questions for nearest neighbors queries with context embeddings.
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
pooler_output: tf.Tensor | None = None
hidden_states: tuple[tf.Tensor, ...] | None = None
attentions: tuple[tf.Tensor, ...] | None = None
@dataclass
class TFDPRReaderOutput(ModelOutput):
"""
Class for outputs of [`TFDPRReaderEncoder`].
Args:
start_logits (`tf.Tensor` of shape `(n_passages, sequence_length)`):
Logits of the start index of the span for each passage.
end_logits (`tf.Tensor` of shape `(n_passages, sequence_length)`):
Logits of the end index of the span for each passage.
relevance_logits (`tf.Tensor` of shape `(n_passages, )`):
Outputs of the QA classifier of the DPRReader that corresponds to the scores of each passage to answer the
question, compared to all the other passages.
hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
start_logits: tf.Tensor | None = None
end_logits: tf.Tensor | None = None
relevance_logits: tf.Tensor | None = None
hidden_states: tuple[tf.Tensor, ...] | None = None
attentions: tuple[tf.Tensor, ...] | None = None
class TFDPREncoderLayer(keras.layers.Layer):
base_model_prefix = "bert_model"
def __init__(self, config: DPRConfig, **kwargs):
super().__init__(**kwargs)
# resolve name conflict with TFBertMainLayer instead of TFBertModel
self.bert_model = TFBertMainLayer(config, add_pooling_layer=False, name="bert_model")
self.config = config
if self.config.hidden_size <= 0:
raise ValueError("Encoder hidden_size can't be zero")
self.projection_dim = config.projection_dim
if self.projection_dim > 0:
self.encode_proj = keras.layers.Dense(
config.projection_dim, kernel_initializer=get_initializer(config.initializer_range), name="encode_proj"
)
@unpack_inputs
def call(
self,
input_ids: tf.Tensor | None = None,
attention_mask: tf.Tensor | None = None,
token_type_ids: tf.Tensor | None = None,
inputs_embeds: tf.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
training: bool = False,
) -> TFBaseModelOutputWithPooling | tuple[tf.Tensor, ...]:
outputs = self.bert_model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
pooled_output = sequence_output[:, 0, :]
if self.projection_dim > 0:
pooled_output = self.encode_proj(pooled_output)
if not return_dict:
return (sequence_output, pooled_output) + outputs[1:]
return TFBaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@property
def embeddings_size(self) -> int:
if self.projection_dim > 0:
return self.projection_dim
return self.bert_model.config.hidden_size
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "bert_model", None) is not None:
with tf.name_scope(self.bert_model.name):
self.bert_model.build(None)
if getattr(self, "encode_proj", None) is not None:
with tf.name_scope(self.encode_proj.name):
self.encode_proj.build(None)
class TFDPRSpanPredictorLayer(keras.layers.Layer):
base_model_prefix = "encoder"
def __init__(self, config: DPRConfig, **kwargs):
super().__init__(**kwargs)
self.config = config
self.encoder = TFDPREncoderLayer(config, name="encoder")
self.qa_outputs = keras.layers.Dense(
2, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
self.qa_classifier = keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="qa_classifier"
)
@unpack_inputs
def call(
self,
input_ids: tf.Tensor | None = None,
attention_mask: tf.Tensor | None = None,
inputs_embeds: tf.Tensor | None = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = False,
training: bool = False,
) -> TFDPRReaderOutput | tuple[tf.Tensor, ...]:
# notations: N - number of questions in a batch, M - number of passages per questions, L - sequence length
n_passages, sequence_length = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)[:2]
# feed encoder
outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = outputs[0]
# compute logits
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
relevance_logits = self.qa_classifier(sequence_output[:, 0, :])
# resize
start_logits = tf.reshape(start_logits, [n_passages, sequence_length])
end_logits = tf.reshape(end_logits, [n_passages, sequence_length])
relevance_logits = tf.reshape(relevance_logits, [n_passages])
if not return_dict:
return (start_logits, end_logits, relevance_logits) + outputs[2:]
return TFDPRReaderOutput(
start_logits=start_logits,
end_logits=end_logits,
relevance_logits=relevance_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "encoder", None) is not None:
with tf.name_scope(self.encoder.name):
self.encoder.build(None)
if getattr(self, "qa_outputs", None) is not None:
with tf.name_scope(self.qa_outputs.name):
self.qa_outputs.build([None, None, self.encoder.embeddings_size])
if getattr(self, "qa_classifier", None) is not None:
with tf.name_scope(self.qa_classifier.name):
self.qa_classifier.build([None, None, self.encoder.embeddings_size])
class TFDPRSpanPredictor(TFPreTrainedModel):
base_model_prefix = "encoder"
def __init__(self, config: DPRConfig, **kwargs):
super().__init__(config, **kwargs)
self.encoder = TFDPRSpanPredictorLayer(config)
@unpack_inputs
def call(
self,
input_ids: tf.Tensor | None = None,
attention_mask: tf.Tensor | None = None,
token_type_ids: tf.Tensor | None = None,
inputs_embeds: tf.Tensor | None = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = False,
training: bool = False,
) -> TFDPRReaderOutput | tuple[tf.Tensor, ...]:
outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
class TFDPREncoder(TFPreTrainedModel):
base_model_prefix = "encoder"
def __init__(self, config: DPRConfig, **kwargs):
super().__init__(config, **kwargs)
self.encoder = TFDPREncoderLayer(config)
@unpack_inputs
def call(
self,
input_ids: tf.Tensor | None = None,
attention_mask: tf.Tensor | None = None,
token_type_ids: tf.Tensor | None = None,
inputs_embeds: tf.Tensor | None = None,
output_attentions: bool = False,
output_hidden_states: bool = False,
return_dict: bool = False,
training: bool = False,
) -> TFDPRReaderOutput | tuple[tf.Tensor, ...]:
outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
##################
# PreTrainedModel
##################
class TFDPRPretrainedContextEncoder(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DPRConfig
base_model_prefix = "ctx_encoder"
class TFDPRPretrainedQuestionEncoder(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DPRConfig
base_model_prefix = "question_encoder"
class TFDPRPretrainedReader(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = DPRConfig
base_model_prefix = "reader"
###############
# Actual Models
###############
TF_DPR_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a Tensorflow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)
subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to
general usage and behavior.
<Tip>
TensorFlow models and layers in `transformers` accept two formats as input:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional argument.
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
positional argument:
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Note that when creating models and layers with
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
about any of this, as you can just pass inputs like you would to any other Python function!
</Tip>
Parameters:
config ([`DPRConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
TF_DPR_ENCODERS_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. To match pretraining, DPR input sequence should be
formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs (for a pair title+text for example):
```
tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
```
(b) For single sequences (for a question for example):
```
tokens: [CLS] the dog is hairy . [SEP]
token_type_ids: 0 0 0 0 0 0 0
```
DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
rather than the left.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
inputs_embeds (`Numpy array` or `tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
TF_DPR_READER_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shapes `(n_passages, sequence_length)`):
Indices of input sequence tokens in the vocabulary. It has to be a sequence triplet with 1) the question
and 2) the passages titles and 3) the passages texts To match pretraining, DPR `input_ids` sequence should
be formatted with [CLS] and [SEP] with the format:
`[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>`
DPR is a model with absolute position embeddings so it's usually advised to pad the inputs on the right
rather than the left.
Indices can be obtained using [`DPRReaderTokenizer`]. See this class documentation for more details.
attention_mask (`Numpy array` or `tf.Tensor` of shape `(n_passages, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
inputs_embeds (`Numpy array` or `tf.Tensor` of shape `(n_passages, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare DPRContextEncoder transformer outputting pooler outputs as context representations.",
TF_DPR_START_DOCSTRING,
)
class TFDPRContextEncoder(TFDPRPretrainedContextEncoder):
def __init__(self, config: DPRConfig, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.ctx_encoder = TFDPREncoderLayer(config, name="ctx_encoder")
def get_input_embeddings(self):
try:
return self.ctx_encoder.bert_model.get_input_embeddings()
except AttributeError:
self.build()
return self.ctx_encoder.bert_model.get_input_embeddings()
@unpack_inputs
@add_start_docstrings_to_model_forward(TF_DPR_ENCODERS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFDPRContextEncoderOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: TFModelInputType | None = None,
attention_mask: tf.Tensor | None = None,
token_type_ids: tf.Tensor | None = None,
inputs_embeds: tf.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
training: bool = False,
) -> TFDPRContextEncoderOutput | tuple[tf.Tensor, ...]:
r"""
Return:
Examples:
```python
>>> from transformers import TFDPRContextEncoder, DPRContextEncoderTokenizer
>>> tokenizer = DPRContextEncoderTokenizer.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base")
>>> model = TFDPRContextEncoder.from_pretrained("facebook/dpr-ctx_encoder-single-nq-base", from_pt=True)
>>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="tf")["input_ids"]
>>> embeddings = model(input_ids).pooler_output
```
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if attention_mask is None:
attention_mask = (
tf.ones(input_shape, dtype=tf.dtypes.int32)
if input_ids is None
else (input_ids != self.config.pad_token_id)
)
if token_type_ids is None:
token_type_ids = tf.zeros(input_shape, dtype=tf.dtypes.int32)
outputs = self.ctx_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
if not return_dict:
return outputs[1:]
return TFDPRContextEncoderOutput(
pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "ctx_encoder", None) is not None:
with tf.name_scope(self.ctx_encoder.name):
self.ctx_encoder.build(None)
@add_start_docstrings(
"The bare DPRQuestionEncoder transformer outputting pooler outputs as question representations.",
TF_DPR_START_DOCSTRING,
)
class TFDPRQuestionEncoder(TFDPRPretrainedQuestionEncoder):
def __init__(self, config: DPRConfig, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.question_encoder = TFDPREncoderLayer(config, name="question_encoder")
def get_input_embeddings(self):
try:
return self.question_encoder.bert_model.get_input_embeddings()
except AttributeError:
self.build()
return self.question_encoder.bert_model.get_input_embeddings()
@unpack_inputs
@add_start_docstrings_to_model_forward(TF_DPR_ENCODERS_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFDPRQuestionEncoderOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: TFModelInputType | None = None,
attention_mask: tf.Tensor | None = None,
token_type_ids: tf.Tensor | None = None,
inputs_embeds: tf.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
training: bool = False,
) -> TFDPRQuestionEncoderOutput | tuple[tf.Tensor, ...]:
r"""
Return:
Examples:
```python
>>> from transformers import TFDPRQuestionEncoder, DPRQuestionEncoderTokenizer
>>> tokenizer = DPRQuestionEncoderTokenizer.from_pretrained("facebook/dpr-question_encoder-single-nq-base")
>>> model = TFDPRQuestionEncoder.from_pretrained("facebook/dpr-question_encoder-single-nq-base", from_pt=True)
>>> input_ids = tokenizer("Hello, is my dog cute ?", return_tensors="tf")["input_ids"]
>>> embeddings = model(input_ids).pooler_output
```
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if attention_mask is None:
attention_mask = (
tf.ones(input_shape, dtype=tf.dtypes.int32)
if input_ids is None
else (input_ids != self.config.pad_token_id)
)
if token_type_ids is None:
token_type_ids = tf.zeros(input_shape, dtype=tf.dtypes.int32)
outputs = self.question_encoder(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
if not return_dict:
return outputs[1:]
return TFDPRQuestionEncoderOutput(
pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "question_encoder", None) is not None:
with tf.name_scope(self.question_encoder.name):
self.question_encoder.build(None)
@add_start_docstrings(
"The bare DPRReader transformer outputting span predictions.",
TF_DPR_START_DOCSTRING,
)
class TFDPRReader(TFDPRPretrainedReader):
def __init__(self, config: DPRConfig, *args, **kwargs):
super().__init__(config, *args, **kwargs)
self.span_predictor = TFDPRSpanPredictorLayer(config, name="span_predictor")
def get_input_embeddings(self):
try:
return self.span_predictor.encoder.bert_model.get_input_embeddings()
except AttributeError:
self.build()
return self.span_predictor.encoder.bert_model.get_input_embeddings()
@unpack_inputs
@add_start_docstrings_to_model_forward(TF_DPR_READER_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=TFDPRReaderOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids: TFModelInputType | None = None,
attention_mask: tf.Tensor | None = None,
inputs_embeds: tf.Tensor | None = None,
output_attentions: bool | None = None,
output_hidden_states: bool | None = None,
return_dict: bool | None = None,
training: bool = False,
) -> TFDPRReaderOutput | tuple[tf.Tensor, ...]:
r"""
Return:
Examples:
```python
>>> from transformers import TFDPRReader, DPRReaderTokenizer
>>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
>>> model = TFDPRReader.from_pretrained("facebook/dpr-reader-single-nq-base", from_pt=True)
>>> encoded_inputs = tokenizer(
... questions=["What is love ?"],
... titles=["Haddaway"],
... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
... return_tensors="tf",
... )
>>> outputs = model(encoded_inputs)
>>> start_logits = outputs.start_logits
>>> end_logits = outputs.end_logits
>>> relevance_logits = outputs.relevance_logits
```
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if attention_mask is None:
attention_mask = tf.ones(input_shape, dtype=tf.dtypes.int32)
return self.span_predictor(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
def build(self, input_shape=None):
if self.built:
return
self.built = True
if getattr(self, "span_predictor", None) is not None:
with tf.name_scope(self.span_predictor.name):
self.span_predictor.build(None)
__all__ = [
"TFDPRContextEncoder",
"TFDPRPretrainedContextEncoder",
"TFDPRPretrainedQuestionEncoder",
"TFDPRPretrainedReader",
"TFDPRQuestionEncoder",
"TFDPRReader",
]
| transformers/src/transformers/models/dpr/modeling_tf_dpr.py/0 | {
"file_path": "transformers/src/transformers/models/dpr/modeling_tf_dpr.py",
"repo_id": "transformers",
"token_count": 13776
} | 461 |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...modeling_rope_utils import rope_config_validation
class EfficientLoFTRConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`EffientLoFTRFromKeypointMatching`].
It is used to instantiate a EfficientLoFTR model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the
EfficientLoFTR [zju-community/efficientloftr](https://huggingface.co/zju-community/efficientloftr) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
stage_num_blocks (`List`, *optional*, defaults to [1, 2, 4, 14]):
The number of blocks in each stages
out_features (`List`, *optional*, defaults to [64, 64, 128, 256]):
The number of channels in each stage
stage_stride (`List`, *optional*, defaults to [2, 1, 2, 2]):
The stride used in each stage
hidden_size (`int`, *optional*, defaults to 256):
The dimension of the descriptors.
activation_function (`str`, *optional*, defaults to `"relu"`):
The activation function used in the backbone
q_aggregation_kernel_size (`int`, *optional*, defaults to 4):
The kernel size of the aggregation of query states in the fusion network
kv_aggregation_kernel_size (`int`, *optional*, defaults to 4):
The kernel size of the aggregation of key and value states in the fusion network
q_aggregation_stride (`int`, *optional*, defaults to 4):
The stride of the aggregation of query states in the fusion network
kv_aggregation_stride (`int`, *optional*, defaults to 4):
The stride of the aggregation of key and value states in the fusion network
num_attention_layers (`int`, *optional*, defaults to 4):
Number of attention layers in the LocalFeatureTransformer
num_attention_heads (`int`, *optional*, defaults to 8):
The number of heads in the GNN layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during attention.
mlp_activation_function (`str`, *optional*, defaults to `"leaky_relu"`):
Activation function used in the attention mlp layer.
coarse_matching_skip_softmax (`bool`, *optional*, defaults to `False`):
Whether to skip softmax or not at the coarse matching step.
coarse_matching_threshold (`float`, *optional*, defaults to 0.2):
The threshold for the minimum score required for a match.
coarse_matching_temperature (`float`, *optional*, defaults to 0.1):
The temperature to apply to the coarse similarity matrix
coarse_matching_border_removal (`int`, *optional*, defaults to 2):
The size of the border to remove during coarse matching
fine_kernel_size (`int`, *optional*, defaults to 8):
Kernel size used for the fine feature matching
batch_norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the batch normalization layers.
embedding_size (`List`, *optional*, defaults to [15, 20]):
The size (height, width) of the embedding for the position embeddings.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
partial_rotary_factor (`float`, *optional*, defaults to 4.0):
Dim factor for the RoPE embeddings, in EfficientLoFTR, frequencies should be generated for
the whole hidden_size, so this factor is used to compensate.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3', '2d'], with 'default' being the original RoPE implementation.
`dim` (`int`): The dimension of the RoPE embeddings.
fine_matching_slice_dim (`int`, *optional*, defaults to 8):
The size of the slice used to divide the fine features for the first and second fine matching stages.
fine_matching_regress_temperature (`float`, *optional*, defaults to 10.0):
The temperature to apply to the fine similarity matrix
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Examples:
```python
>>> from transformers import EfficientLoFTRConfig, EfficientLoFTRForKeypointMatching
>>> # Initializing a EfficientLoFTR configuration
>>> configuration = EfficientLoFTRConfig()
>>> # Initializing a model from the EfficientLoFTR configuration
>>> model = EfficientLoFTRForKeypointMatching(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "efficientloftr"
def __init__(
self,
stage_num_blocks: Optional[list[int]] = None,
out_features: Optional[list[int]] = None,
stage_stride: Optional[list[int]] = None,
hidden_size: int = 256,
activation_function: str = "relu",
q_aggregation_kernel_size: int = 4,
kv_aggregation_kernel_size: int = 4,
q_aggregation_stride: int = 4,
kv_aggregation_stride: int = 4,
num_attention_layers: int = 4,
num_attention_heads: int = 8,
attention_dropout: float = 0.0,
attention_bias: bool = False,
mlp_activation_function: str = "leaky_relu",
coarse_matching_skip_softmax: bool = False,
coarse_matching_threshold: float = 0.2,
coarse_matching_temperature: float = 0.1,
coarse_matching_border_removal: int = 2,
fine_kernel_size: int = 8,
batch_norm_eps: float = 1e-5,
embedding_size: Optional[list[int]] = None,
rope_theta: float = 10000.0,
partial_rotary_factor: float = 4.0,
rope_scaling: Optional[dict] = None,
fine_matching_slice_dim: int = 8,
fine_matching_regress_temperature: float = 10.0,
initializer_range: float = 0.02,
**kwargs,
):
# Stage level of RepVGG
self.stage_num_blocks = stage_num_blocks if stage_num_blocks is not None else [1, 2, 4, 14]
self.stage_stride = stage_stride if stage_stride is not None else [2, 1, 2, 2]
self.out_features = out_features if out_features is not None else [64, 64, 128, 256]
self.stage_in_channels = [1] + self.out_features[:-1]
# Block level of RepVGG
self.stage_block_stride = [
[stride] + [1] * (num_blocks - 1) for stride, num_blocks in zip(self.stage_stride, self.stage_num_blocks)
]
self.stage_block_out_channels = [
[self.out_features[stage_idx]] * num_blocks for stage_idx, num_blocks in enumerate(self.stage_num_blocks)
]
self.stage_block_in_channels = [
[self.stage_in_channels[stage_idx]] + self.stage_block_out_channels[stage_idx][:-1]
for stage_idx in range(len(self.stage_num_blocks))
]
# Fine matching level of EfficientLoFTR
self.fine_fusion_dims = list(reversed(self.out_features))[:-1]
self.hidden_size = hidden_size
if self.hidden_size != self.out_features[-1]:
raise ValueError(
f"hidden_size should be equal to the last value in out_features. hidden_size = {self.hidden_size}, out_features = {self.stage_out_channels}"
)
self.activation_function = activation_function
self.q_aggregation_kernel_size = q_aggregation_kernel_size
self.kv_aggregation_kernel_size = kv_aggregation_kernel_size
self.q_aggregation_stride = q_aggregation_stride
self.kv_aggregation_stride = kv_aggregation_stride
self.num_attention_layers = num_attention_layers
self.num_attention_heads = num_attention_heads
self.attention_dropout = attention_dropout
self.attention_bias = attention_bias
self.intermediate_size = self.hidden_size * 2
self.mlp_activation_function = mlp_activation_function
self.coarse_matching_skip_softmax = coarse_matching_skip_softmax
self.coarse_matching_threshold = coarse_matching_threshold
self.coarse_matching_temperature = coarse_matching_temperature
self.coarse_matching_border_removal = coarse_matching_border_removal
self.fine_kernel_size = fine_kernel_size
self.batch_norm_eps = batch_norm_eps
self.fine_matching_slice_dim = fine_matching_slice_dim
self.fine_matching_regress_temperature = fine_matching_regress_temperature
self.num_key_value_heads = num_attention_heads
self.embedding_size = embedding_size if embedding_size is not None else [15, 20]
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling if rope_scaling is not None else {"rope_type": "default"}
# for compatibility with "default" rope type
self.partial_rotary_factor = partial_rotary_factor
rope_config_validation(self)
self.initializer_range = initializer_range
super().__init__(**kwargs)
__all__ = ["EfficientLoFTRConfig"]
| transformers/src/transformers/models/efficientloftr/configuration_efficientloftr.py/0 | {
"file_path": "transformers/src/transformers/models/efficientloftr/configuration_efficientloftr.py",
"repo_id": "transformers",
"token_count": 4109
} | 462 |
# Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
def _calculate_bin_centers(boundaries: torch.Tensor) -> torch.Tensor:
step = boundaries[1] - boundaries[0]
bin_centers = boundaries + step / 2
bin_centers = torch.cat([bin_centers, (bin_centers[-1] + step).unsqueeze(-1)], dim=0)
return bin_centers
def _calculate_expected_aligned_error(
alignment_confidence_breaks: torch.Tensor,
aligned_distance_error_probs: torch.Tensor,
) -> tuple[torch.Tensor, torch.Tensor]:
bin_centers = _calculate_bin_centers(alignment_confidence_breaks)
return (
torch.sum(aligned_distance_error_probs * bin_centers, dim=-1),
bin_centers[-1],
)
def compute_predicted_aligned_error(
logits: torch.Tensor,
max_bin: int = 31,
no_bins: int = 64,
**kwargs,
) -> dict[str, torch.Tensor]:
"""Computes aligned confidence metrics from logits.
Args:
logits: [*, num_res, num_res, num_bins] the logits output from
PredictedAlignedErrorHead.
max_bin: Maximum bin value
no_bins: Number of bins
Returns:
aligned_confidence_probs: [*, num_res, num_res, num_bins] the predicted
aligned error probabilities over bins for each residue pair.
predicted_aligned_error: [*, num_res, num_res] the expected aligned distance
error for each pair of residues.
max_predicted_aligned_error: [*] the maximum predicted error possible.
"""
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
aligned_confidence_probs = torch.nn.functional.softmax(logits, dim=-1)
predicted_aligned_error, max_predicted_aligned_error = _calculate_expected_aligned_error(
alignment_confidence_breaks=boundaries,
aligned_distance_error_probs=aligned_confidence_probs,
)
return {
"aligned_confidence_probs": aligned_confidence_probs,
"predicted_aligned_error": predicted_aligned_error,
"max_predicted_aligned_error": max_predicted_aligned_error,
}
def compute_tm(
logits: torch.Tensor,
residue_weights: Optional[torch.Tensor] = None,
max_bin: int = 31,
no_bins: int = 64,
eps: float = 1e-8,
**kwargs,
) -> torch.Tensor:
if residue_weights is None:
residue_weights = logits.new_ones(logits.shape[-2])
boundaries = torch.linspace(0, max_bin, steps=(no_bins - 1), device=logits.device)
bin_centers = _calculate_bin_centers(boundaries)
torch.sum(residue_weights)
n = logits.shape[-2]
clipped_n = max(n, 19)
d0 = 1.24 * (clipped_n - 15) ** (1.0 / 3) - 1.8
probs = torch.nn.functional.softmax(logits, dim=-1)
tm_per_bin = 1.0 / (1 + (bin_centers**2) / (d0**2))
predicted_tm_term = torch.sum(probs * tm_per_bin, dim=-1)
normed_residue_mask = residue_weights / (eps + residue_weights.sum())
per_alignment = torch.sum(predicted_tm_term * normed_residue_mask, dim=-1)
weighted = per_alignment * residue_weights
argmax = (weighted == torch.max(weighted)).nonzero()[0]
return per_alignment[tuple(argmax)]
| transformers/src/transformers/models/esm/openfold_utils/loss.py/0 | {
"file_path": "transformers/src/transformers/models/esm/openfold_utils/loss.py",
"repo_id": "transformers",
"token_count": 1381
} | 463 |
# coding=utf-8
# Copyright 2023 the Falcon authors and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Falcon configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class FalconConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`FalconModel`]. It is used to instantiate a Falcon
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the
[tiiuae/falcon-7b](https://huggingface.co/tiiuae/falcon-7b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 65024):
Vocabulary size of the Falcon model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`FalconModel`]
hidden_size (`int`, *optional*, defaults to 4544):
Dimension of the hidden representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 71):
Number of attention heads for each attention layer in the Transformer encoder.
num_ln_in_parallel_attn (`int`, *optional*):
Set to 2 if separate layer norms are to be used for the MLP and the attention output when using parallel
attention, otherwise, 1.
layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
The epsilon used by the layer normalization layers.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
use_cache (`bool`, *optional*, defaults to `True`):
Whether the model should return the last key/values attentions (not used by all models). Only relevant if
`config.is_decoder=True`.
hidden_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for MLP layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for attention layers.
num_kv_heads (`int`, *optional*):
Number of key-value heads to use per attention layer. If unset, defaults to the same value as
`num_attention_heads`.
alibi (`bool`, *optional*, defaults to `False`):
Whether to use ALiBi positional biases during self-attention.
new_decoder_architecture (`bool`, *optional*, defaults to `False`):
Whether to use the new (Falcon-40B) decoder architecture. If `True`, the `multi_query` and `parallel_attn`
arguments are ignored, as the new decoder always uses parallel attention.
multi_query (`bool`, *optional*, defaults to `True`):
Whether to use multi-query attention in the decoder. Ignored when `new_decoder_architecture` is `True`.
parallel_attn (`bool`, *optional*, defaults to `True`):
Whether to compute attention in parallel with the feedforward layer. If False, they are consecutive
instead, as in the original Transformer architecture. Ignored when `new_decoder_architecture` is `True`.
bias (`bool`, *optional*, defaults to `False`):
Whether to use bias on Linear layers.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with, when `alibi` is `False`. Pretrained
Falcon models with RoPE support up to 2048 tokens.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
bos_token_id (`int`, *optional*, defaults to 11):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 11):
The id of the "end-of-sequence" token.
ffn_hidden_size (`int`, *optional*):
The hidden size of the feedforward layer in the Transformer decoder.
defaults to 4x hidden dim
activation (`str`, *optional*, defaults to `"gelu"`):
The activation function used in the feedforward layer.
Example:
```python
>>> from transformers import FalconModel, FalconConfig
>>> # Initializing a small (2-layer) Falcon configuration
>>> configuration = FalconConfig(num_hidden_layers=2)
>>> # Initializing a model from the small configuration
>>> model = FalconModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "falcon"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=65024,
hidden_size=4544,
num_hidden_layers=32,
num_attention_heads=71,
num_ln_in_parallel_attn=None,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
use_cache=True,
hidden_dropout=0.0,
attention_dropout=0.0,
num_kv_heads=None,
alibi=False,
new_decoder_architecture=False,
multi_query=True,
parallel_attn=True,
bias=False,
max_position_embeddings=2048,
rope_theta=10000.0,
rope_scaling=None,
bos_token_id=11,
eos_token_id=11,
ffn_hidden_size=None,
activation="gelu",
**kwargs,
):
self.vocab_size = vocab_size
# Backward compatibility with n_embed kwarg
n_embed = kwargs.pop("n_embed", None)
self.hidden_size = hidden_size if n_embed is None else n_embed
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.use_cache = use_cache
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.num_kv_heads = num_attention_heads if num_kv_heads is None else num_kv_heads
self.alibi = alibi
self.new_decoder_architecture = new_decoder_architecture
self.multi_query = multi_query # Ignored when new_decoder_architecture is True
self.parallel_attn = parallel_attn
self.bias = bias
self.num_ln_in_parallel_attn = num_ln_in_parallel_attn
self.max_position_embeddings = max_position_embeddings
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.activation = activation
if ffn_hidden_size is None:
self.ffn_hidden_size = hidden_size * 4
else:
self.ffn_hidden_size = ffn_hidden_size
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
@property
def head_dim(self):
return self.hidden_size // self.num_attention_heads
@property
def rotary(self):
return not self.alibi
__all__ = ["FalconConfig"]
| transformers/src/transformers/models/falcon/configuration_falcon.py/0 | {
"file_path": "transformers/src/transformers/models/falcon/configuration_falcon.py",
"repo_id": "transformers",
"token_count": 4312
} | 464 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert FastSpeech2Conformer checkpoint."""
import argparse
import torch
from transformers import (
FastSpeech2ConformerConfig,
FastSpeech2ConformerHifiGan,
FastSpeech2ConformerHifiGanConfig,
FastSpeech2ConformerModel,
FastSpeech2ConformerWithHifiGan,
FastSpeech2ConformerWithHifiGanConfig,
logging,
)
from .convert_fastspeech2_conformer_original_pytorch_checkpoint_to_pytorch import (
convert_espnet_state_dict_to_hf,
remap_model_yaml_config,
)
from .convert_hifigan import load_weights, remap_hifigan_yaml_config
logging.set_verbosity_info()
logger = logging.get_logger("transformers.models.FastSpeech2Conformer")
def convert_FastSpeech2ConformerWithHifiGan_checkpoint(
checkpoint_path,
yaml_config_path,
pytorch_dump_folder_path,
repo_id=None,
):
# Prepare the model
model_params, *_ = remap_model_yaml_config(yaml_config_path)
model_config = FastSpeech2ConformerConfig(**model_params)
model = FastSpeech2ConformerModel(model_config)
espnet_checkpoint = torch.load(checkpoint_path, weights_only=True)
hf_compatible_state_dict = convert_espnet_state_dict_to_hf(espnet_checkpoint)
model.load_state_dict(hf_compatible_state_dict)
# Prepare the vocoder
config_kwargs = remap_hifigan_yaml_config(yaml_config_path)
vocoder_config = FastSpeech2ConformerHifiGanConfig(**config_kwargs)
vocoder = FastSpeech2ConformerHifiGan(vocoder_config)
load_weights(espnet_checkpoint, vocoder, vocoder_config)
# Prepare the model + vocoder
config = FastSpeech2ConformerWithHifiGanConfig.from_sub_model_configs(model_config, vocoder_config)
with_hifigan_model = FastSpeech2ConformerWithHifiGan(config)
with_hifigan_model.model = model
with_hifigan_model.vocoder = vocoder
with_hifigan_model.save_pretrained(pytorch_dump_folder_path)
if repo_id:
print("Pushing to the hub...")
with_hifigan_model.push_to_hub(repo_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument(
"--yaml_config_path", required=True, default=None, type=str, help="Path to config.yaml of model to convert"
)
parser.add_argument(
"--pytorch_dump_folder_path",
required=True,
default=None,
type=str,
help="Path to the output `FastSpeech2ConformerModel` PyTorch model.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
args = parser.parse_args()
convert_FastSpeech2ConformerWithHifiGan_checkpoint(
args.checkpoint_path,
args.yaml_config_path,
args.pytorch_dump_folder_path,
args.push_to_hub,
)
| transformers/src/transformers/models/fastspeech2_conformer/convert_model_with_hifigan.py/0 | {
"file_path": "transformers/src/transformers/models/fastspeech2_conformer/convert_model_with_hifigan.py",
"repo_id": "transformers",
"token_count": 1305
} | 465 |
# coding=utf-8
# Copyright 2022 Meta Platforms authors and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Image/Text processor class for FLAVA
"""
import warnings
from typing import Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class FlavaProcessor(ProcessorMixin):
r"""
Constructs a FLAVA processor which wraps a FLAVA image processor and a FLAVA tokenizer into a single processor.
[`FlavaProcessor`] offers all the functionalities of [`FlavaImageProcessor`] and [`BertTokenizerFast`]. See the
[`~FlavaProcessor.__call__`] and [`~FlavaProcessor.decode`] for more information.
Args:
image_processor ([`FlavaImageProcessor`], *optional*): The image processor is a required input.
tokenizer ([`BertTokenizerFast`], *optional*): The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "FlavaImageProcessor"
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
feature_extractor = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")
image_processor = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
def __call__(
self,
images: Optional[ImageInput] = None,
text: Optional[Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]] = None,
add_special_tokens: bool = True,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = False,
max_length: Optional[int] = None,
stride: int = 0,
pad_to_multiple_of: Optional[int] = None,
return_image_mask: Optional[bool] = None,
return_codebook_pixels: Optional[bool] = None,
return_token_type_ids: Optional[bool] = None,
return_attention_mask: Optional[bool] = None,
return_overflowing_tokens: bool = False,
return_special_tokens_mask: bool = False,
return_offsets_mapping: bool = False,
return_length: bool = False,
verbose: bool = True,
return_tensors: Optional[Union[str, TensorType]] = None,
**kwargs,
):
"""
This method uses [`FlavaImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Please refer to the docstring of the above two methods for more information.
"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
encoding = self.tokenizer(
text=text,
add_special_tokens=add_special_tokens,
padding=padding,
truncation=truncation,
max_length=max_length,
stride=stride,
pad_to_multiple_of=pad_to_multiple_of,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask,
return_offsets_mapping=return_offsets_mapping,
return_length=return_length,
verbose=verbose,
return_tensors=return_tensors,
**kwargs,
)
if images is not None:
image_features = self.image_processor(
images,
return_image_mask=return_image_mask,
return_codebook_pixels=return_codebook_pixels,
return_tensors=return_tensors,
**kwargs,
)
if text is not None and images is not None:
encoding.update(image_features)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class
@property
def feature_extractor(self):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
FutureWarning,
)
return self.image_processor
__all__ = ["FlavaProcessor"]
| transformers/src/transformers/models/flava/processing_flava.py/0 | {
"file_path": "transformers/src/transformers/models/flava/processing_flava.py",
"repo_id": "transformers",
"token_count": 2446
} | 466 |
# coding=utf-8
# Copyright 2023 Microsoft Research and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch FocalNet model."""
import collections.abc
import math
from dataclasses import dataclass
from typing import Optional, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import ModelOutput, auto_docstring, logging
from ...utils.backbone_utils import BackboneMixin
from .configuration_focalnet import FocalNetConfig
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
FocalNet encoder's outputs, with potential hidden states.
"""
)
class FocalNetEncoderOutput(ModelOutput):
r"""
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
FocalNet model's outputs that also contains a pooling of the last hidden states.
"""
)
class FocalNetModelOutput(ModelOutput):
r"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed):
Average pooling of the last layer hidden-state.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
FocalNet masked image model outputs.
"""
)
class FocalNetMaskedImageModelingOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `bool_masked_pos` is provided):
Masked image modeling (MLM) loss.
reconstruction (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Reconstructed pixel values.
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
reconstruction: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
FocalNet outputs for image classification.
"""
)
class FocalNetImageClassifierOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of
shape `(batch_size, hidden_size, height, width)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to
include the spatial dimensions.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
reshaped_hidden_states: Optional[tuple[torch.FloatTensor]] = None
class FocalNetEmbeddings(nn.Module):
"""
Construct the patch embeddings and layernorm. Optionally, also the mask token.
"""
def __init__(self, config, use_mask_token=False):
super().__init__()
self.patch_embeddings = FocalNetPatchEmbeddings(
config=config,
image_size=config.image_size,
patch_size=config.patch_size,
num_channels=config.num_channels,
embed_dim=config.embed_dim,
use_conv_embed=config.use_conv_embed,
is_stem=True,
)
self.patch_grid = self.patch_embeddings.grid_size
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.embed_dim)) if use_mask_token else None
self.norm = nn.LayerNorm(config.embed_dim, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self, pixel_values: Optional[torch.FloatTensor], bool_masked_pos: Optional[torch.BoolTensor] = None
) -> tuple[torch.Tensor]:
embeddings, output_dimensions = self.patch_embeddings(pixel_values)
embeddings = self.norm(embeddings)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
mask = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1.0 - mask) + mask_tokens * mask
embeddings = self.dropout(embeddings)
return embeddings, output_dimensions
class FocalNetPatchEmbeddings(nn.Module):
def __init__(
self,
config,
image_size,
patch_size,
num_channels,
embed_dim,
add_norm=False,
use_conv_embed=False,
is_stem=False,
):
super().__init__()
image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.grid_size = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
if use_conv_embed:
# if we choose to use conv embedding, then we treat the stem and non-stem differently
if is_stem:
kernel_size = 7
padding = 2
stride = 4
else:
kernel_size = 3
padding = 1
stride = 2
self.projection = nn.Conv2d(
num_channels, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
)
else:
self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
if add_norm:
self.norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
else:
self.norm = None
def maybe_pad(self, pixel_values, height, width):
if width % self.patch_size[1] != 0:
pad_values = (0, self.patch_size[1] - width % self.patch_size[1])
pixel_values = nn.functional.pad(pixel_values, pad_values)
if height % self.patch_size[0] != 0:
pad_values = (0, 0, 0, self.patch_size[0] - height % self.patch_size[0])
pixel_values = nn.functional.pad(pixel_values, pad_values)
return pixel_values
def forward(self, pixel_values: Optional[torch.FloatTensor]) -> tuple[torch.Tensor, tuple[int]]:
_, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
# pad the input to be divisible by self.patch_size, if needed
pixel_values = self.maybe_pad(pixel_values, height, width)
embeddings = self.projection(pixel_values)
_, _, height, width = embeddings.shape
output_dimensions = (height, width)
embeddings = embeddings.flatten(2).transpose(1, 2)
if self.norm is not None:
embeddings = self.norm(embeddings)
return embeddings, output_dimensions
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks,
however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the
layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the
argument.
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->FocalNet
class FocalNetDropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: Optional[float] = None) -> None:
super().__init__()
self.drop_prob = drop_prob
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
return drop_path(hidden_states, self.drop_prob, self.training)
def extra_repr(self) -> str:
return f"p={self.drop_prob}"
class FocalNetModulation(nn.Module):
def __init__(self, config, index, dim, focal_factor=2, bias=True, projection_dropout=0.0):
super().__init__()
self.dim = dim
self.focal_window = config.focal_windows[index]
self.focal_level = config.focal_levels[index]
self.focal_factor = focal_factor
self.use_post_layernorm_in_modulation = config.use_post_layernorm_in_modulation
self.normalize_modulator = config.normalize_modulator
self.projection_in = nn.Linear(dim, 2 * dim + (self.focal_level + 1), bias=bias)
self.projection_context = nn.Conv2d(dim, dim, kernel_size=1, stride=1, bias=bias)
self.activation = nn.GELU()
self.projection_out = nn.Linear(dim, dim)
self.projection_dropout = nn.Dropout(projection_dropout)
self.focal_layers = nn.ModuleList()
self.kernel_sizes = []
for k in range(self.focal_level):
kernel_size = self.focal_factor * k + self.focal_window
self.focal_layers.append(
nn.Sequential(
nn.Conv2d(
dim, dim, kernel_size=kernel_size, stride=1, groups=dim, padding=kernel_size // 2, bias=False
),
nn.GELU(),
)
)
self.kernel_sizes.append(kernel_size)
if self.use_post_layernorm_in_modulation:
self.layernorm = nn.LayerNorm(dim, eps=config.layer_norm_eps)
def forward(self, hidden_state):
"""
Args:
hidden_state:
Input features with shape of (batch_size, height, width, num_channels)
"""
num_channels = hidden_state.shape[-1]
# pre linear projection
x = self.projection_in(hidden_state).permute(0, 3, 1, 2).contiguous()
q, ctx, gates = torch.split(x, (num_channels, num_channels, self.focal_level + 1), 1)
# context aggregation
ctx_all = 0
for level in range(self.focal_level):
ctx = self.focal_layers[level](ctx)
ctx_all = ctx_all + ctx * gates[:, level : level + 1]
ctx_global = self.activation(ctx.mean(2, keepdim=True).mean(3, keepdim=True))
ctx_all = ctx_all + ctx_global * gates[:, self.focal_level :]
# normalize context
if self.normalize_modulator:
ctx_all = ctx_all / (self.focal_level + 1)
# focal modulation
modulator = self.projection_context(ctx_all)
x_out = q * modulator
x_out = x_out.permute(0, 2, 3, 1).contiguous()
if self.use_post_layernorm_in_modulation:
x_out = self.layernorm(x_out)
# post linear projection
x_out = self.projection_out(x_out)
x_out = self.projection_dropout(x_out)
return x_out
class FocalNetMlp(nn.Module):
def __init__(self, config, in_features, hidden_features=None, out_features=None, drop=0.0):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.activation = ACT2FN[config.hidden_act]
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, hidden_state):
hidden_state = self.fc1(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.drop(hidden_state)
hidden_state = self.fc2(hidden_state)
hidden_state = self.drop(hidden_state)
return hidden_state
class FocalNetLayer(nn.Module):
r"""Focal Modulation Network layer (block).
Args:
config (`FocalNetConfig`):
Model config.
index (`int`):
Layer index.
dim (`int`):
Number of input channels.
input_resolution (`tuple[int]`):
Input resolution.
drop_path (`float`, *optional*, defaults to 0.0):
Stochastic depth rate.
"""
def __init__(self, config, index, dim, input_resolution, drop_path=0.0):
super().__init__()
self.config = config
# layer-specific attributes
self.dim = dim
self.input_resolution = input_resolution
# general attributes
self.drop = config.hidden_dropout_prob
self.use_post_layernorm = config.use_post_layernorm
self.norm1 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
self.modulation = FocalNetModulation(
config=config,
index=index,
dim=dim,
projection_dropout=self.drop,
)
self.drop_path = FocalNetDropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = nn.LayerNorm(dim, eps=config.layer_norm_eps)
mlp_hidden_dim = int(dim * config.mlp_ratio)
self.mlp = FocalNetMlp(config=config, in_features=dim, hidden_features=mlp_hidden_dim, drop=self.drop)
self.gamma_1 = 1.0
self.gamma_2 = 1.0
if config.use_layerscale:
self.gamma_1 = nn.Parameter(config.layerscale_value * torch.ones(dim), requires_grad=True)
self.gamma_2 = nn.Parameter(config.layerscale_value * torch.ones(dim), requires_grad=True)
def forward(self, hidden_state, input_dimensions):
height, width = input_dimensions
batch_size, _, num_channels = hidden_state.shape
shortcut = hidden_state
# Focal Modulation
hidden_state = hidden_state if self.use_post_layernorm else self.norm1(hidden_state)
hidden_state = hidden_state.view(batch_size, height, width, num_channels)
hidden_state = self.modulation(hidden_state).view(batch_size, height * width, num_channels)
hidden_state = hidden_state if not self.use_post_layernorm else self.norm1(hidden_state)
# FFN
hidden_state = shortcut + self.drop_path(self.gamma_1 * hidden_state)
hidden_state = hidden_state + self.drop_path(
self.gamma_2
* (self.norm2(self.mlp(hidden_state)) if self.use_post_layernorm else self.mlp(self.norm2(hidden_state)))
)
return hidden_state
class FocalNetStage(GradientCheckpointingLayer):
def __init__(self, config, index, input_resolution):
super().__init__()
self.config = config
self.num_stages = len(config.depths)
embed_dim = [config.embed_dim * (2**i) for i in range(self.num_stages)]
dim = embed_dim[index]
out_dim = embed_dim[index + 1] if (index < self.num_stages - 1) else None
downsample = FocalNetPatchEmbeddings if (index < self.num_stages - 1) else None
# stochastic depth decay rule
dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths), device="cpu")]
drop_path = dpr[sum(config.depths[:index]) : sum(config.depths[: index + 1])]
self.layers = nn.ModuleList(
[
FocalNetLayer(
config=config,
index=index,
dim=dim,
input_resolution=input_resolution,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
)
for i in range(config.depths[index])
]
)
if downsample is not None:
self.downsample = downsample(
config=config,
image_size=input_resolution,
patch_size=2,
num_channels=dim,
embed_dim=out_dim,
add_norm=True,
use_conv_embed=config.use_conv_embed,
is_stem=False,
)
else:
self.downsample = None
self.pointing = False
def forward(self, hidden_states: torch.Tensor, input_dimensions: tuple[int, int]) -> tuple[torch.Tensor]:
height, width = input_dimensions
for layer_module in self.layers:
hidden_states = layer_module(hidden_states, input_dimensions)
hidden_states_before_downsampling = hidden_states
if self.downsample is not None:
height, width = input_dimensions
hidden_states = hidden_states.transpose(1, 2).reshape(
hidden_states_before_downsampling.shape[0], -1, height, width
)
hidden_states, output_dimensions = self.downsample(hidden_states)
else:
output_dimensions = (height, width, height, width)
stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
return stage_outputs
class FocalNetEncoder(nn.Module):
def __init__(self, config, grid_size):
super().__init__()
self.num_stages = len(config.depths)
self.config = config
self.stages = nn.ModuleList(
[
FocalNetStage(
config=config,
index=i_layer,
input_resolution=(grid_size[0] // (2**i_layer), grid_size[1] // (2**i_layer)),
)
for i_layer in range(self.num_stages)
]
)
self.gradient_checkpointing = False
def forward(
self,
hidden_states: torch.Tensor,
input_dimensions: tuple[int, int],
output_hidden_states: Optional[bool] = False,
output_hidden_states_before_downsampling: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple, FocalNetEncoderOutput]:
all_hidden_states = () if output_hidden_states else None
all_reshaped_hidden_states = () if output_hidden_states else None
if output_hidden_states:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
for i, stage_module in enumerate(self.stages):
stage_outputs = stage_module(hidden_states, input_dimensions)
hidden_states = stage_outputs[0]
hidden_states_before_downsampling = stage_outputs[1]
output_dimensions = stage_outputs[2]
input_dimensions = (output_dimensions[-2], output_dimensions[-1])
if output_hidden_states and output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states_before_downsampling.shape
# rearrange b (h w) c -> b c h w
# here we use the original (not downsampled) height and width
reshaped_hidden_state = hidden_states_before_downsampling.view(
batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states_before_downsampling,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
elif output_hidden_states and not output_hidden_states_before_downsampling:
batch_size, _, hidden_size = hidden_states.shape
# rearrange b (h w) c -> b c h w
reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
all_hidden_states += (hidden_states,)
all_reshaped_hidden_states += (reshaped_hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None)
return FocalNetEncoderOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
reshaped_hidden_states=all_reshaped_hidden_states,
)
@auto_docstring
class FocalNetPreTrainedModel(PreTrainedModel):
config: FocalNetConfig
base_model_prefix = "focalnet"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["FocalNetStage"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, FocalNetEmbeddings):
if module.mask_token is not None:
module.mask_token.data.zero_()
elif isinstance(module, FocalNetLayer):
if self.config.use_layerscale:
module.gamma_1.data.fill_(self.config.layerscale_value)
module.gamma_2.data.fill_(self.config.layerscale_value)
@auto_docstring
class FocalNetModel(FocalNetPreTrainedModel):
def __init__(self, config, add_pooling_layer=True, use_mask_token=False):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
use_mask_token (`bool`, *optional*, defaults to `False`):
Whether to use a mask token for masked image modeling.
"""
super().__init__(config)
self.config = config
self.num_stages = len(config.depths)
self.num_features = int(config.embed_dim * 2 ** (self.num_stages - 1))
self.embeddings = FocalNetEmbeddings(config, use_mask_token=use_mask_token)
self.encoder = FocalNetEncoder(config, self.embeddings.patch_grid)
self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps)
self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, FocalNetModelOutput]:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output, input_dimensions = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
encoder_outputs = self.encoder(
embedding_output,
input_dimensions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
pooled_output = None
if self.pooler is not None:
pooled_output = self.pooler(sequence_output.transpose(1, 2))
pooled_output = torch.flatten(pooled_output, 1)
if not return_dict:
output = (sequence_output, pooled_output) + encoder_outputs[1:]
return output
return FocalNetModelOutput(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
reshaped_hidden_states=encoder_outputs.reshaped_hidden_states,
)
@auto_docstring(
custom_intro="""
FocalNet Model with a decoder on top for masked image modeling.
This follows the same implementation as in [SimMIM](https://huggingface.co/papers/2111.09886).
<Tip>
Note that we provide a script to pre-train this model on custom data in our [examples
directory](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining).
</Tip>
"""
)
class FocalNetForMaskedImageModeling(FocalNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.focalnet = FocalNetModel(config, add_pooling_layer=False, use_mask_token=True)
self.num_stages = len(config.depths)
num_features = int(config.embed_dim * 2 ** (self.num_stages - 1))
self.decoder = nn.Sequential(
nn.Conv2d(
in_channels=num_features, out_channels=config.encoder_stride**2 * config.num_channels, kernel_size=1
),
nn.PixelShuffle(config.encoder_stride),
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
bool_masked_pos: Optional[torch.BoolTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, FocalNetMaskedImageModelingOutput]:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
Examples:
```python
>>> from transformers import AutoImageProcessor, FocalNetConfig, FocalNetForMaskedImageModeling
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-base-simmim-window6-192")
>>> config = FocalNetConfig()
>>> model = FocalNetForMaskedImageModeling(config)
>>> num_patches = (model.config.image_size // model.config.patch_size) ** 2
>>> pixel_values = image_processor(images=image, return_tensors="pt").pixel_values
>>> # create random boolean mask of shape (batch_size, num_patches)
>>> bool_masked_pos = torch.randint(low=0, high=2, size=(1, num_patches)).bool()
>>> outputs = model(pixel_values, bool_masked_pos=bool_masked_pos)
>>> loss, reconstructed_pixel_values = outputs.loss, outputs.logits
>>> list(reconstructed_pixel_values.shape)
[1, 3, 192, 192]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.focalnet(
pixel_values,
bool_masked_pos=bool_masked_pos,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
# Reshape to (batch_size, num_channels, height, width)
sequence_output = sequence_output.transpose(1, 2)
batch_size, num_channels, sequence_length = sequence_output.shape
height = width = math.floor(sequence_length**0.5)
sequence_output = sequence_output.reshape(batch_size, num_channels, height, width)
# Reconstruct pixel values
reconstructed_pixel_values = self.decoder(sequence_output)
masked_im_loss = None
if bool_masked_pos is not None:
size = self.config.image_size // self.config.patch_size
bool_masked_pos = bool_masked_pos.reshape(-1, size, size)
mask = (
bool_masked_pos.repeat_interleave(self.config.patch_size, 1)
.repeat_interleave(self.config.patch_size, 2)
.unsqueeze(1)
.contiguous()
)
reconstruction_loss = nn.functional.l1_loss(pixel_values, reconstructed_pixel_values, reduction="none")
masked_im_loss = (reconstruction_loss * mask).sum() / (mask.sum() + 1e-5) / self.config.num_channels
if not return_dict:
output = (reconstructed_pixel_values,) + outputs[2:]
return ((masked_im_loss,) + output) if masked_im_loss is not None else output
return FocalNetMaskedImageModelingOutput(
loss=masked_im_loss,
reconstruction=reconstructed_pixel_values,
hidden_states=outputs.hidden_states,
reshaped_hidden_states=outputs.reshaped_hidden_states,
)
@auto_docstring(
custom_intro="""
FocalNet Model with an image classification head on top (a linear layer on top of the pooled output) e.g. for
ImageNet.
"""
)
class FocalNetForImageClassification(FocalNetPreTrainedModel):
# Copied from transformers.models.swin.modeling_swin.SwinForImageClassification.__init__ with Swin->FocalNet, swin->focalnet
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.focalnet = FocalNetModel(config)
# Classifier head
self.classifier = (
nn.Linear(self.focalnet.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, FocalNetImageClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.focalnet(
pixel_values,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return FocalNetImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
reshaped_hidden_states=outputs.reshaped_hidden_states,
)
@auto_docstring(
custom_intro="""
FocalNet backbone, to be used with frameworks like X-Decoder.
"""
)
class FocalNetBackbone(FocalNetPreTrainedModel, BackboneMixin):
def __init__(self, config: FocalNetConfig):
super().__init__(config)
super()._init_backbone(config)
self.num_features = [config.embed_dim] + config.hidden_sizes
self.focalnet = FocalNetModel(config)
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import AutoImageProcessor, AutoBackbone
>>> import torch
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> processor = AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny-lrf")
>>> model = AutoBackbone.from_pretrained("microsoft/focalnet-tiny-lrf")
>>> inputs = processor(image, return_tensors="pt")
>>> outputs = model(**inputs)
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.focalnet(pixel_values, output_hidden_states=True, return_dict=True)
hidden_states = outputs.reshaped_hidden_states
feature_maps = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=None,
)
__all__ = [
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
| transformers/src/transformers/models/focalnet/modeling_focalnet.py/0 | {
"file_path": "transformers/src/transformers/models/focalnet/modeling_focalnet.py",
"repo_id": "transformers",
"token_count": 16586
} | 467 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image processor class for Fuyu."""
import math
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
pad,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
infer_channel_dimension_format,
is_scaled_image,
is_valid_image,
make_list_of_images,
to_numpy_array,
validate_preprocess_arguments,
)
from ...utils import (
TensorType,
filter_out_non_signature_kwargs,
is_torch_available,
is_torch_device,
is_torch_dtype,
logging,
requires_backends,
)
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
def make_list_of_list_of_images(
images: Union[list[list[ImageInput]], list[ImageInput], ImageInput],
) -> list[list[ImageInput]]:
if is_valid_image(images):
return [[images]]
if isinstance(images, list) and all(isinstance(image, list) for image in images):
return images
if isinstance(images, list):
return [make_list_of_images(image) for image in images]
raise ValueError("images must be a list of list of images or a list of images or an image.")
class FuyuBatchFeature(BatchFeature):
"""
BatchFeature class for Fuyu image processor and processor.
The outputs dictionary from the processors contains a mix of tensors and lists of tensors.
"""
def convert_to_tensors(self, tensor_type: Optional[Union[str, TensorType]] = None):
"""
Convert the inner content to tensors.
Args:
tensor_type (`str` or [`~utils.TensorType`], *optional*):
The type of tensors to use. If `str`, should be one of the values of the enum [`~utils.TensorType`]. If
`None`, no modification is done.
"""
if tensor_type is None:
return self
is_tensor, as_tensor = self._get_is_as_tensor_fns(tensor_type=tensor_type)
def _convert_tensor(elem):
if is_tensor(elem):
return elem
return as_tensor(elem)
def _safe_convert_tensor(elem):
try:
return _convert_tensor(elem)
except: # noqa E722
if key == "overflowing_values":
raise ValueError("Unable to create tensor returning overflowing values of different lengths. ")
raise ValueError(
"Unable to create tensor, you should probably activate padding "
"with 'padding=True' to have batched tensors with the same length."
)
# Do the tensor conversion in batch
for key, value in self.items():
if isinstance(value, list) and isinstance(value[0], list):
# list[list[Any]] -> list[list[Tensor]]
self[key] = [[_safe_convert_tensor(elem) for elem in elems] for elems in value]
elif isinstance(value, list):
# list[Any] -> list[Tensor]
self[key] = [_safe_convert_tensor(elem) for elem in value]
else:
# Any -> Tensor
self[key] = _safe_convert_tensor(value)
return self
def to(self, *args, **kwargs) -> "BatchFeature":
"""
Send all values to device by calling `v.to(*args, **kwargs)` (PyTorch only). This should support casting in
different `dtypes` and sending the `BatchFeature` to a different `device`.
Args:
args (`Tuple`):
Will be passed to the `to(...)` function of the tensors.
kwargs (`Dict`, *optional*):
Will be passed to the `to(...)` function of the tensors.
Returns:
[`BatchFeature`]: The same instance after modification.
"""
requires_backends(self, ["torch"])
import torch # noqa
new_data = {}
device = kwargs.get("device")
# Check if the args are a device or a dtype
if device is None and len(args) > 0:
# device should be always the first argument
arg = args[0]
if is_torch_dtype(arg):
# The first argument is a dtype
pass
elif isinstance(arg, str) or is_torch_device(arg) or isinstance(arg, int):
device = arg
else:
# it's something else
raise ValueError(f"Attempting to cast a BatchFeature to type {str(arg)}. This is not supported.")
def _to(elem):
# check if v is a floating point
if torch.is_floating_point(elem):
# cast and send to device
return elem.to(*args, **kwargs)
if device is not None:
return elem.to(device=device)
return elem
# We cast only floating point tensors to avoid issues with tokenizers casting `LongTensor` to `FloatTensor`
for k, v in self.items():
if isinstance(v, list) and isinstance(v[0], list):
# Data structure is a list of lists
new_v = []
for elems in v:
new_v.append([_to(elem) for elem in elems])
new_data[k] = new_v
elif isinstance(v, list):
# Data structure is a list
new_data[k] = [_to(elem) for elem in v]
else:
new_data[k] = _to(v)
self.data = new_data
return self
class FuyuImageProcessor(BaseImageProcessor):
"""
This class should handle the image processing part before the main FuyuForCausalLM. In particular, it should
handle:
- Processing Images:
Taking a batch of images as input. If the images are variable-sized, it resizes them based on the desired patch
dimensions. The image output is always img_h, img_w of (1080, 1920)
Then, it patches up these images using the patchify_image function.
- Creating Image Input IDs:
For each patch, a placeholder ID is given to identify where these patches belong in a token sequence. For
variable-sized images, each line of patches is terminated with a newline ID.
- Image Patch Indices:
For each image patch, the code maintains an index where these patches should be inserted in a token stream.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the image to `size`.
size (`dict[str, int]`, *optional*, defaults to `{"height": 1080, "width": 1920}`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
do_pad (`bool`, *optional*, defaults to `True`):
Whether to pad the image to `size`.
padding_value (`float`, *optional*, defaults to 1.0):
The value to pad the image with.
padding_mode (`str`, *optional*, defaults to `"constant"`):
The padding mode to use when padding the image.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether to normalize the image.
image_mean (`float`, *optional*, defaults to 0.5):
The mean to use when normalizing the image.
image_std (`float`, *optional*, defaults to 0.5):
The standard deviation to use when normalizing the image.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `1 / 255`):
The factor to use when rescaling the image.
patch_size (`dict[str, int]`, *optional*, defaults to `{"height": 30, "width": 30}`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
"""
model_input_names = [
"images",
"image_input_ids",
"image_patches",
"image_patch_indices_per_batch",
"image_patch_indices_per_subsequence",
]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_pad: bool = True,
padding_value: float = 1.0,
padding_mode: str = "constant",
do_normalize: bool = True,
image_mean: Union[float, list[float]] = 0.5,
image_std: Union[float, list[float]] = 0.5,
do_rescale: bool = True,
rescale_factor: float = 1 / 255,
patch_size: Optional[dict[str, int]] = None,
**kwargs,
):
super().__init__(**kwargs)
self.do_resize = do_resize
self.size = size if size is not None else {"height": 1080, "width": 1920}
self.resample = resample
self.do_pad = do_pad
self.padding_value = padding_value
self.padding_mode = padding_mode
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.patch_size = patch_size if patch_size is not None else {"height": 30, "width": 30}
def resize(
self,
image: np.ndarray,
size: dict[str, int],
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to resize.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the output image. If unset, the channel dimension format of the input
image is used. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
- `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
Returns:
`np.ndarray`: The resized image.
"""
image_height, image_width = get_image_size(image, input_data_format)
target_height, target_width = size["height"], size["width"]
if image_width <= target_width and image_height <= target_height:
return image
height_scale_factor = target_height / image_height
width_scale_factor = target_width / image_width
optimal_scale_factor = min(height_scale_factor, width_scale_factor)
new_height = int(image_height * optimal_scale_factor)
new_width = int(image_width * optimal_scale_factor)
scaled_image = resize(
image=image,
size=(new_height, new_width),
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
**kwargs,
)
return scaled_image
def pad_image(
self,
image: np.ndarray,
size: dict[str, int],
mode: str = "constant",
constant_values: float = 1.0,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""
Pad an image to `(size["height"], size["width"])`.
Args:
image (`np.ndarray`):
Image to pad.
size (`dict[str, int]`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
data_format (`ChannelDimension` or `str`, *optional*):
The data format of the output image. If unset, the same format as the input image is used.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
image_height, image_width = get_image_size(image, input_data_format)
target_height, target_width = size["height"], size["width"]
padding_top = 0
padding_left = 0
padding_bottom = target_height - image_height
padding_right = target_width - image_width
padded_image = pad(
image,
padding=((padding_top, padding_bottom), (padding_left, padding_right)),
mode=mode,
constant_values=constant_values,
data_format=data_format,
input_data_format=input_data_format,
)
return padded_image
@filter_out_non_signature_kwargs()
def preprocess(
self,
images,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_pad: Optional[bool] = None,
padding_value: Optional[float] = None,
padding_mode: Optional[str] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[float] = None,
image_std: Optional[float] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
patch_size: Optional[dict[str, int]] = None,
data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
return_tensors: Optional[TensorType] = None,
):
"""
Utility function to preprocess the images and extract necessary information about original formats.
Args:
images (`ImageInput`):
Images to preprocess. Expects a single image, a list or images or a list of lists of images. Pixel
values range from 0 to 255, or between 0 and 1 if `do_rescale` is `False`.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the image to `size`.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
`PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
do_pad (`bool`, *optional*, defaults to `self.do_pad`):
Whether to pad the image to `size`.
padding_value (`float`, *optional*, defaults to `self.padding_value`):
The value to pad the image with.
padding_mode (`str`, *optional*, defaults to `self.padding_mode`):
The padding mode to use when padding the image.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the image.
image_mean (`float`, *optional*, defaults to `self.image_mean`):
The mean to use when normalizing the image.
image_std (`float`, *optional*, defaults to `self.image_std`):
The standard deviation to use when normalizing the image.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the image.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
The factor to use when rescaling the image.
patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be one of:
- Unset: Return a list of `np.ndarray`.
- `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
- `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
- `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
The channel dimension format of the output image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image. If unset, the channel dimension format is inferred
from the input image. Can be one of:
- `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
- `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
"""
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
resample = resample if resample is not None else self.resample
do_pad = do_pad if do_pad is not None else self.do_pad
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
padding_value = padding_value if padding_value is not None else self.padding_value
padding_mode = padding_mode if padding_mode is not None else self.padding_mode
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
patch_size = patch_size if patch_size is not None else self.patch_size
if isinstance(images, list) and any(isinstance(elem, list) and len(elem) >= 2 for elem in images):
raise ValueError("Multiple images for a single sample are not yet supported.")
batch_images = make_list_of_list_of_images(images)
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_pad=do_pad,
size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg.
do_resize=do_resize,
size=size,
resample=resample,
)
# All transformations expect numpy arrays.
batch_images = [[to_numpy_array(image) for image in images] for images in batch_images]
if do_rescale and is_scaled_image(batch_images[0][0]):
logger.warning_once(
"It looks like you are trying to rescale already rescaled images. If the input"
" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
)
if input_data_format is None:
# We assume that all images have the same channel dimension format.
input_data_format = infer_channel_dimension_format(batch_images[0][0])
original_image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images]
size = get_size_dict(size) # for BC
if do_resize:
batch_images = [
[self.resize(image, size=size, input_data_format=input_data_format) for image in images]
for images in batch_images
]
image_sizes = [get_image_size(images[0], channel_dim=input_data_format) for images in batch_images]
image_unpadded_heights = [[image_size[0]] for image_size in image_sizes]
image_unpadded_widths = [[image_size[1]] for image_size in image_sizes]
# scale_h is the same as scale_w
image_scale_factors = [
[resized_size[0] / original_size[0]]
for original_size, resized_size in zip(original_image_sizes, image_sizes)
]
if do_pad:
batch_images = [
[
self.pad_image(
image,
size=size,
mode=padding_mode,
constant_values=padding_value,
input_data_format=input_data_format,
)
for image in images
]
for images in batch_images
]
if do_rescale:
batch_images = [
[self.rescale(image, scale=rescale_factor, input_data_format=input_data_format) for image in images]
for images in batch_images
]
if do_normalize:
batch_images = [
[
self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)
for image in images
]
for images in batch_images
]
if data_format is not None:
batch_images = [
[to_channel_dimension_format(image, data_format, input_data_format) for image in images]
for images in batch_images
]
data = {
"images": batch_images,
"image_unpadded_heights": image_unpadded_heights,
"image_unpadded_widths": image_unpadded_widths,
"image_scale_factors": image_scale_factors,
}
return FuyuBatchFeature(data=data, tensor_type=return_tensors)
def get_num_patches(self, image_height: int, image_width: int, patch_size: Optional[dict[str, int]] = None) -> int:
"""
Calculate number of patches required to encode an image.
Args:
image_height (`int`):
Height of the image.
image_width (`int`):
Width of the image.
patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
"""
patch_size = patch_size if patch_size is not None else self.patch_size
patch_height, patch_width = self.patch_size["height"], self.patch_size["width"]
if image_height % patch_height != 0:
raise ValueError(f"{image_height=} must be divisible by {patch_height}")
if image_width % patch_width != 0:
raise ValueError(f"{image_width=} must be divisible by {patch_width}")
num_patches_per_dim_h = image_height // patch_height
num_patches_per_dim_w = image_width // patch_width
num_patches = num_patches_per_dim_h * num_patches_per_dim_w
return num_patches
def patchify_image(self, image: "torch.Tensor", patch_size: Optional[dict[str, int]] = None) -> "torch.Tensor":
"""
Convert an image into a tensor of patches.
Args:
image (`torch.Tensor`):
Image to convert. Shape: [batch, channels, height, width]
patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the patches.
"""
requires_backends(self, ["torch"])
patch_size = patch_size if patch_size is not None else self.patch_size
patch_height, patch_width = patch_size["height"], patch_size["width"]
# TODO refer to https://github.com/ArthurZucker/transformers/blob/0f0a3fe5ca5697ee58faeb5b53f049af720b5e98/src/transformers/models/vit_mae/modeling_vit_mae.py#L871
# torch implementation is faster but does not handle non-squares
batch_size, channels, _, _ = image.shape
unfolded_along_height = image.unfold(2, patch_height, patch_height)
patches = unfolded_along_height.unfold(3, patch_width, patch_width)
patches = patches.contiguous()
patches = patches.view(batch_size, channels, -1, patch_height, patch_width)
patches = patches.permute(0, 2, 3, 4, 1)
patches = patches.reshape(batch_size, -1, channels * patch_height * patch_width)
return patches
def preprocess_with_tokenizer_info(
self,
image_input: "torch.Tensor",
image_present: "torch.Tensor",
image_unpadded_h: "torch.Tensor",
image_unpadded_w: "torch.Tensor",
image_placeholder_id: int,
image_newline_id: int,
variable_sized: bool,
patch_size: Optional[dict[str, int]] = None,
) -> FuyuBatchFeature:
"""Process images for model input. In particular, variable-sized images are handled here.
Args:
image_input (`torch.Tensor` of shape [batch_size, subsequence_size, num_channels, height, width]):
Tensor of images padded to model input size.
image_present (`torch.Tensor` of shape [batch_size, subsequence_size, num_images]):
Tensor of 1s and 0s indicating whether an image is present.
image_unpadded_h (`torch.Tensor` of shape [batch_size, subsequence_size]):
Tensor of unpadded image heights.
image_unpadded_w (`torch.Tensor` of shape [batch_size, subsequence_size]):
Tensor of unpadded image widths.
image_placeholder_id (int):
The id of the image placeholder token. Comes from an associated tokenizer.
image_newline_id (int):
The id of the image newline token. Comes from an associated tokenizer.
variable_sized (bool):
Whether to process images as variable-sized.
patch_size (`dict[str, int]`, *optional*, defaults to `self.patch_size`):
Size of the patches.
"""
requires_backends(self, ["torch"])
patch_size = patch_size if patch_size is not None else self.patch_size
patch_height, patch_width = patch_size["height"], patch_size["width"]
# Only images that are present.
images: list[list[torch.Tensor]] = []
batch_image_patches: list[list[torch.Tensor]] = []
# Image input ids for every subsequence, including ones with no image present.
batch_image_input_ids: list[list[torch.Tensor]] = []
for batch_index in range(image_input.shape[0]):
image_input_ids = []
image_patches = []
for subseq_index in range(image_input.shape[1]):
if image_present[batch_index, subseq_index]:
image = image_input[batch_index, subseq_index]
image_height, image_width = image.shape[1], image.shape[2]
if variable_sized:
# The min() is required here due to floating point issues:
# math.ceil(torch.tensor(300).cuda() / 30) == 11
new_h = min(
image_height,
math.ceil(image_unpadded_h[batch_index, subseq_index] / patch_height) * patch_height,
)
new_w = min(
image_width,
math.ceil(image_unpadded_w[batch_index, subseq_index] / patch_width) * patch_width,
)
image = image[:, :new_h, :new_w]
image_height, image_width = new_h, new_w
num_patches = self.get_num_patches(image_height=image_height, image_width=image_width)
tensor_of_image_ids = torch.full(
[num_patches], image_placeholder_id, dtype=torch.int32, device=image_input.device
)
patches = self.patchify_image(image=image.unsqueeze(0)).squeeze(0)
assert num_patches == patches.shape[0]
if variable_sized:
# Now terminate each line with |NEWLINE|.
tensor_of_image_ids = tensor_of_image_ids.reshape(-1, image_width // patch_width)
newline_ids = torch.full(
[tensor_of_image_ids.shape[0], 1],
image_newline_id,
dtype=torch.int32,
device=image_input.device,
)
tensor_of_image_ids = torch.cat([tensor_of_image_ids, newline_ids], dim=1)
tensor_of_image_ids = tensor_of_image_ids.reshape(-1)
images.append([image])
image_input_ids.append(tensor_of_image_ids)
image_patches.append(patches)
else:
image_input_ids.append(torch.tensor([], dtype=torch.int32, device=image_input.device))
batch_image_input_ids.append(image_input_ids)
batch_image_patches.append(image_patches)
# Create image_patch_input_indices, where non-negative values correspond to image patches to be inserted in
# the stream.
image_patch_indices_per_batch: list[list[torch.Tensor]] = []
image_patch_indices_per_subsequence: list[list[torch.Tensor]] = []
for sample_image_input_ids in batch_image_input_ids:
index_offset = 0
per_batch_indices = []
per_subsequence_indices = []
for subseq_image_input_ids in sample_image_input_ids:
# Indices of image patches.
patches_mask = subseq_image_input_ids == image_placeholder_id
num_patches = torch.count_nonzero(patches_mask)
indices = torch.arange(num_patches, dtype=torch.int64, device=subseq_image_input_ids.device).type_as(
subseq_image_input_ids
)
# Place those indices in the image input ids token stream, with -1 representing non-index tokens.
indices_in_stream_per_batch = torch.full_like(subseq_image_input_ids, -1)
indices_in_stream_per_subsequence = torch.full_like(subseq_image_input_ids, -1)
patches_inds = torch.nonzero(patches_mask, as_tuple=True)[0]
indices_in_stream_per_batch[patches_inds] = indices + index_offset
indices_in_stream_per_subsequence[patches_inds] = indices
per_batch_indices.append(indices_in_stream_per_batch)
per_subsequence_indices.append(indices_in_stream_per_subsequence)
index_offset += num_patches
image_patch_indices_per_batch.append(per_batch_indices)
image_patch_indices_per_subsequence.append(per_subsequence_indices)
return FuyuBatchFeature(
data={
"images": images,
"image_input_ids": batch_image_input_ids,
"image_patches": batch_image_patches,
"image_patch_indices_per_batch": image_patch_indices_per_batch,
"image_patch_indices_per_subsequence": image_patch_indices_per_subsequence,
}
)
__all__ = ["FuyuImageProcessor"]
| transformers/src/transformers/models/fuyu/image_processing_fuyu.py/0 | {
"file_path": "transformers/src/transformers/models/fuyu/image_processing_fuyu.py",
"repo_id": "transformers",
"token_count": 15054
} | 468 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class GitVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GitVisionModel`]. It is used to instantiate a GIT
vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the vision encoder of the GIT
[microsoft/git-base](https://huggingface.co/microsoft/git-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
Example:
```python
>>> from transformers import GitVisionConfig, GitVisionModel
>>> # Initializing a GitVisionConfig with microsoft/git-base style configuration
>>> configuration = GitVisionConfig()
>>> # Initializing a GitVisionModel (with random weights) from the microsoft/git-base style configuration
>>> model = GitVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "git_vision_model"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=768,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=12,
num_channels=3,
image_size=224,
patch_size=16,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.image_size = image_size
self.initializer_range = initializer_range
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
class GitConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GitModel`]. It is used to instantiate a GIT model
according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the GIT
[microsoft/git-base](https://huggingface.co/microsoft/git-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`GitVisionConfig`].
vocab_size (`int`, *optional*, defaults to 30522):
Vocabulary size of the GIT model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GitModel`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
position_embedding_type (`str`, *optional*, defaults to `"absolute"`):
Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For
positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to
[Self-Attention with Relative Position Representations (Shaw et al.)](https://huggingface.co/papers/1803.02155).
For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models
with Better Relative Position Embeddings (Huang et al.)](https://huggingface.co/papers/2009.13658).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
num_image_with_embedding (`int`, *optional*):
The number of temporal embeddings to add, in case the model is used for video captioning/VQA.
Examples:
```python
>>> from transformers import GitConfig, GitModel
>>> # Initializing a GIT microsoft/git-base style configuration
>>> configuration = GitConfig()
>>> # Initializing a model (with random weights) from the microsoft/git-base style configuration
>>> model = GitModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "git"
sub_configs = {"vision_config": GitVisionConfig}
def __init__(
self,
vision_config=None,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=6,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=1024,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
position_embedding_type="absolute",
use_cache=True,
tie_word_embeddings=False,
bos_token_id=101,
eos_token_id=102,
num_image_with_embedding=None,
**kwargs,
):
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, pad_token_id=pad_token_id, **kwargs)
if vision_config is None:
vision_config = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values.")
self.vision_config = GitVisionConfig(**vision_config)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.tie_word_embeddings = tie_word_embeddings
self.num_image_with_embedding = num_image_with_embedding
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
__all__ = ["GitConfig", "GitVisionConfig"]
| transformers/src/transformers/models/git/configuration_git.py/0 | {
"file_path": "transformers/src/transformers/models/git/configuration_git.py",
"repo_id": "transformers",
"token_count": 3855
} | 469 |
# coding=utf-8
# Copyright 2022 KAIST and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GLPN model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class GLPNConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GLPNModel`]. It is used to instantiate an GLPN
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the GLPN
[vinvino02/glpn-kitti](https://huggingface.co/vinvino02/glpn-kitti) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
num_encoder_blocks (`int`, *optional*, defaults to 4):
The number of encoder blocks (i.e. stages in the Mix Transformer encoder).
depths (`list[int]`, *optional*, defaults to `[2, 2, 2, 2]`):
The number of layers in each encoder block.
sr_ratios (`list[int]`, *optional*, defaults to `[8, 4, 2, 1]`):
Sequence reduction ratios in each encoder block.
hidden_sizes (`list[int]`, *optional*, defaults to `[32, 64, 160, 256]`):
Dimension of each of the encoder blocks.
patch_sizes (`list[int]`, *optional*, defaults to `[7, 3, 3, 3]`):
Patch size before each encoder block.
strides (`list[int]`, *optional*, defaults to `[4, 2, 2, 2]`):
Stride before each encoder block.
num_attention_heads (`list[int]`, *optional*, defaults to `[1, 2, 5, 8]`):
Number of attention heads for each attention layer in each block of the Transformer encoder.
mlp_ratios (`list[int]`, *optional*, defaults to `[4, 4, 4, 4]`):
Ratio of the size of the hidden layer compared to the size of the input layer of the Mix FFNs in the
encoder blocks.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
drop_path_rate (`float`, *optional*, defaults to 0.1):
The dropout probability for stochastic depth, used in the blocks of the Transformer encoder.
layer_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the layer normalization layers.
decoder_hidden_size (`int`, *optional*, defaults to 64):
The dimension of the decoder.
max_depth (`int`, *optional*, defaults to 10):
The maximum depth of the decoder.
head_in_index (`int`, *optional*, defaults to -1):
The index of the features to use in the head.
Example:
```python
>>> from transformers import GLPNModel, GLPNConfig
>>> # Initializing a GLPN vinvino02/glpn-kitti style configuration
>>> configuration = GLPNConfig()
>>> # Initializing a model from the vinvino02/glpn-kitti style configuration
>>> model = GLPNModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "glpn"
def __init__(
self,
num_channels=3,
num_encoder_blocks=4,
depths=[2, 2, 2, 2],
sr_ratios=[8, 4, 2, 1],
hidden_sizes=[32, 64, 160, 256],
patch_sizes=[7, 3, 3, 3],
strides=[4, 2, 2, 2],
num_attention_heads=[1, 2, 5, 8],
mlp_ratios=[4, 4, 4, 4],
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
drop_path_rate=0.1,
layer_norm_eps=1e-6,
decoder_hidden_size=64,
max_depth=10,
head_in_index=-1,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.num_encoder_blocks = num_encoder_blocks
self.depths = depths
self.sr_ratios = sr_ratios
self.hidden_sizes = hidden_sizes
self.patch_sizes = patch_sizes
self.strides = strides
self.mlp_ratios = mlp_ratios
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.drop_path_rate = drop_path_rate
self.layer_norm_eps = layer_norm_eps
self.decoder_hidden_size = decoder_hidden_size
self.max_depth = max_depth
self.head_in_index = head_in_index
__all__ = ["GLPNConfig"]
| transformers/src/transformers/models/glpn/configuration_glpn.py/0 | {
"file_path": "transformers/src/transformers/models/glpn/configuration_glpn.py",
"repo_id": "transformers",
"token_count": 2355
} | 470 |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
import argparse
import torch
from transformers import GPT2Config, GPT2Model, load_tf_weights_in_gpt2
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path):
# Construct model
if gpt2_config_file == "":
config = GPT2Config()
else:
config = GPT2Config.from_json_file(gpt2_config_file)
model = GPT2Model(config)
# Load weights from numpy
load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}")
torch.save(model.state_dict(), pytorch_weights_dump_path)
print(f"Save configuration file to {pytorch_config_dump_path}")
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--gpt2_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--gpt2_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
args = parser.parse_args()
convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path, args.gpt2_config_file, args.pytorch_dump_folder_path)
| transformers/src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/gpt2/convert_gpt2_original_tf_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 937
} | 471 |
# coding=utf-8
# Copyright 2022 EleutherAI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPTNeoX model configuration"""
from ...configuration_utils import PretrainedConfig
from ...modeling_rope_utils import rope_config_validation
from ...utils import logging
logger = logging.get_logger(__name__)
class GPTNeoXConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`GPTNeoXModel`]. It is used to instantiate an
GPTNeoX model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the GPTNeoX
[EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50432):
Vocabulary size of the GPTNeoX model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`GPTNeoXModel`].
hidden_size (`int`, *optional*, defaults to 6144):
Dimension of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 44):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 64):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 24576):
Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
rotary_pct (`float`, *optional*, defaults to 0.25):
percentage of hidden dimensions to allocate to rotary embeddings
rotary_emb_base (`int`, *optional*, defaults to 10000)
base for computing rotary embeddings frequency
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio probability of the attention score.
hidden_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio of (1) the word embeddings, (2) the post-attention hidden states, and (3) the post-mlp
hidden states.
classifier_dropout (`float`, *optional*, defaults to 0.1):
Argument used when doing token classification, used in the model [`GPTNeoXForTokenClassification`].
The dropout ratio for the hidden layer.
max_position_embeddings (`int`, *optional*, defaults to 2048):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
initializer_range (`float`, *optional*, defaults to 1e-5):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
use_parallel_residual (`bool`, *optional*, defaults to `True`):
Whether to use a "parallel" formulation in each Transformer layer, which can provide a slight training
speedup at large scales (e.g. 20B).
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
attention_bias (`bool`, *optional*, defaults to `True`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
Example:
```python
>>> from transformers import GPTNeoXConfig, GPTNeoXModel
>>> # Initializing a GPTNeoX gpt-neox-20b style configuration
>>> configuration = GPTNeoXConfig()
>>> # Initializing a model (with random weights) from the gpt-neox-20b style configuration
>>> model = GPTNeoXModel(configuration) # doctest: +SKIP
>>> # Accessing the model configuration
>>> configuration = model.config # doctest: +SKIP
```"""
model_type = "gpt_neox"
keys_to_ignore_at_inference = ["past_key_values"]
base_model_tp_plan = {
"layers.*.attention.query_key_value": "colwise",
"layers.*.attention.dense": "rowwise",
"layers.*.mlp.dense_h_to_4h": "colwise",
"layers.*.mlp.dense_4h_to_h": "rowwise",
}
base_model_pp_plan = {
"embed_in": (["input_ids"], ["inputs_embeds"]),
"emb_dropout": (["inputs_embeds"], ["hidden_states"]),
"layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
"final_layer_norm": (["hidden_states"], ["hidden_states"]),
}
def __init__(
self,
vocab_size=50432,
hidden_size=6144,
num_hidden_layers=44,
num_attention_heads=64,
intermediate_size=24576,
hidden_act="gelu",
rotary_pct=0.25,
rotary_emb_base=10000,
attention_dropout=0.0,
hidden_dropout=0.0,
classifier_dropout=0.1,
max_position_embeddings=2048,
initializer_range=0.02,
layer_norm_eps=1e-5,
use_cache=True,
bos_token_id=0,
eos_token_id=2,
tie_word_embeddings=False,
use_parallel_residual=True,
rope_scaling=None,
attention_bias=True,
**kwargs,
):
super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.rotary_pct = rotary_pct
self.partial_rotary_factor = rotary_pct
self.rotary_emb_base = rotary_emb_base
self.rope_theta = rotary_emb_base
self.attention_dropout = attention_dropout
self.hidden_dropout = hidden_dropout
self.classifier_dropout = classifier_dropout
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.use_cache = use_cache
self.tie_word_embeddings = tie_word_embeddings
self.use_parallel_residual = use_parallel_residual
self.rope_scaling = rope_scaling
self.attention_bias = attention_bias
# Validate the correctness of rotary position embeddings parameters
# BC: if there is a 'type' field, move it to 'rope_type'.
if self.rope_scaling is not None and "type" in self.rope_scaling:
self.rope_scaling["rope_type"] = self.rope_scaling["type"]
rope_config_validation(self)
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisible by the number of attention heads! Make sure to update them!"
)
__all__ = ["GPTNeoXConfig"]
| transformers/src/transformers/models/gpt_neox/configuration_gpt_neox.py/0 | {
"file_path": "transformers/src/transformers/models/gpt_neox/configuration_gpt_neox.py",
"repo_id": "transformers",
"token_count": 4391
} | 472 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for Grounding DINO.
"""
import pathlib
import warnings
from typing import TYPE_CHECKING, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_transforms import center_to_corners_format
from ...image_utils import AnnotationFormat, ImageInput
from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import BatchEncoding, PreTokenizedInput, TextInput
from ...utils import TensorType, is_torch_available
if is_torch_available():
import torch
if TYPE_CHECKING:
from .modeling_grounding_dino import GroundingDinoObjectDetectionOutput
AnnotationType = dict[str, Union[int, str, list[dict]]]
def get_phrases_from_posmap(posmaps, input_ids):
"""Get token ids of phrases from posmaps and input_ids.
Args:
posmaps (`torch.BoolTensor` of shape `(num_boxes, hidden_size)`):
A boolean tensor of text-thresholded logits related to the detected bounding boxes.
input_ids (`torch.LongTensor`) of shape `(sequence_length, )`):
A tensor of token ids.
"""
left_idx = 0
right_idx = posmaps.shape[-1] - 1
# Avoiding altering the input tensor
posmaps = posmaps.clone()
posmaps[:, 0 : left_idx + 1] = False
posmaps[:, right_idx:] = False
token_ids = []
for posmap in posmaps:
non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()
token_ids.append([input_ids[i] for i in non_zero_idx])
return token_ids
def _is_list_of_candidate_labels(text) -> bool:
"""Check that text is list/tuple of strings and each string is a candidate label and not merged candidate labels text.
Merged candidate labels text is a string with candidate labels separated by a dot.
"""
if isinstance(text, (list, tuple)):
return all(isinstance(t, str) and "." not in t for t in text)
return False
def _merge_candidate_labels_text(text: list[str]) -> str:
"""
Merge candidate labels text into a single string. Ensure all labels are lowercase.
For example, ["A cat", "a dog"] -> "a cat. a dog."
"""
labels = [t.strip().lower() for t in text] # ensure lowercase
merged_labels_str = ". ".join(labels) + "." # join with dot and add a dot at the end
return merged_labels_str
class DictWithDeprecationWarning(dict):
message = (
"The key `labels` is will return integer ids in `GroundingDinoProcessor.post_process_grounded_object_detection` "
"output since v4.51.0. Use `text_labels` instead to retrieve string object names."
)
def __getitem__(self, key):
if key == "labels":
warnings.warn(self.message, FutureWarning)
return super().__getitem__(key)
def get(self, key, *args, **kwargs):
if key == "labels":
warnings.warn(self.message, FutureWarning)
return super().get(key, *args, **kwargs)
class GroundingDinoImagesKwargs(ImagesKwargs, total=False):
annotations: Optional[Union[AnnotationType, list[AnnotationType]]]
return_segmentation_masks: Optional[bool]
masks_path: Optional[Union[str, pathlib.Path]]
do_convert_annotations: Optional[bool]
format: Optional[Union[str, AnnotationFormat]]
class GroundingDinoProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: GroundingDinoImagesKwargs
_defaults = {
"text_kwargs": {
"add_special_tokens": True,
"padding": False,
"stride": 0,
"return_overflowing_tokens": False,
"return_special_tokens_mask": False,
"return_offsets_mapping": False,
"return_token_type_ids": True,
"return_length": False,
"verbose": True,
}
}
class GroundingDinoProcessor(ProcessorMixin):
r"""
Constructs a Grounding DINO processor which wraps a Deformable DETR image processor and a BERT tokenizer into a
single processor.
[`GroundingDinoProcessor`] offers all the functionalities of [`GroundingDinoImageProcessor`] and
[`AutoTokenizer`]. See the docstring of [`~GroundingDinoProcessor.__call__`] and [`~GroundingDinoProcessor.decode`]
for more information.
Args:
image_processor (`GroundingDinoImageProcessor`):
An instance of [`GroundingDinoImageProcessor`]. The image processor is a required input.
tokenizer (`AutoTokenizer`):
An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "GroundingDinoImageProcessor"
tokenizer_class = "AutoTokenizer"
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
def __call__(
self,
images: ImageInput = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
audio=None,
videos=None,
**kwargs: Unpack[GroundingDinoProcessorKwargs],
) -> BatchEncoding:
"""
This method uses [`GroundingDinoImageProcessor.__call__`] method to prepare image(s) for the model, and
[`BertTokenizerFast.__call__`] to prepare text for the model.
Args:
images (`ImageInput`, `list[ImageInput]`, *optional*):
The image or batch of images to be processed. The image might be either PIL image, numpy array or a torch tensor.
text (`TextInput`, `PreTokenizedInput`, `list[TextInput]`, `list[PreTokenizedInput]`, *optional*):
Candidate labels to be detected on the image. The text might be one of the following:
- A list of candidate labels (strings) to be detected on the image (e.g. ["a cat", "a dog"]).
- A batch of candidate labels to be detected on the batch of images (e.g. [["a cat", "a dog"], ["a car", "a person"]]).
- A merged candidate labels string to be detected on the image, separated by "." (e.g. "a cat. a dog.").
- A batch of merged candidate labels text to be detected on the batch of images (e.g. ["a cat. a dog.", "a car. a person."]).
"""
if images is None and text is None:
raise ValueError("You must specify either text or images.")
output_kwargs = self._merge_kwargs(
GroundingDinoProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
# Get only text
if images is not None:
encoding_image_processor = self.image_processor(images, **output_kwargs["images_kwargs"])
else:
encoding_image_processor = BatchFeature()
if text is not None:
text = self._preprocess_input_text(text)
text_encoding = self.tokenizer(
text=text,
**output_kwargs["text_kwargs"],
)
else:
text_encoding = BatchEncoding()
text_encoding.update(encoding_image_processor)
return text_encoding
def _preprocess_input_text(self, text):
"""
Preprocess input text to ensure that labels are in the correct format for the model.
If the text is a list of candidate labels, merge the candidate labels into a single string,
for example, ["a cat", "a dog"] -> "a cat. a dog.". In case candidate labels are already in a form of
"a cat. a dog.", the text is returned as is.
"""
if _is_list_of_candidate_labels(text):
text = _merge_candidate_labels_text(text)
# for batched input
elif isinstance(text, (list, tuple)) and all(_is_list_of_candidate_labels(t) for t in text):
text = [_merge_candidate_labels_text(sample) for sample in text]
return text
def post_process_grounded_object_detection(
self,
outputs: "GroundingDinoObjectDetectionOutput",
input_ids: Optional[TensorType] = None,
threshold: float = 0.25,
text_threshold: float = 0.25,
target_sizes: Optional[Union[TensorType, list[tuple]]] = None,
text_labels: Optional[list[list[str]]] = None,
):
"""
Converts the raw output of [`GroundingDinoForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format and get the associated text label.
Args:
outputs ([`GroundingDinoObjectDetectionOutput`]):
Raw outputs of the model.
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
The token ids of the input text. If not provided will be taken from the model output.
threshold (`float`, *optional*, defaults to 0.25):
Threshold to keep object detection predictions based on confidence score.
text_threshold (`float`, *optional*, defaults to 0.25):
Score threshold to keep text detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
text_labels (`list[list[str]]`, *optional*):
List of candidate labels to be detected on each image. At the moment it's *NOT used*, but required
to be in signature for the zero-shot object detection pipeline. Text labels are instead extracted
from the `input_ids` tensor provided in `outputs`.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the
- **scores**: tensor of confidence scores for detected objects
- **boxes**: tensor of bounding boxes in [x0, y0, x1, y1] format
- **labels**: list of text labels for each detected object (will be replaced with integer ids in v4.51.0)
- **text_labels**: list of text labels for detected objects
"""
batch_logits, batch_boxes = outputs.logits, outputs.pred_boxes
input_ids = input_ids if input_ids is not None else outputs.input_ids
if target_sizes is not None and len(target_sizes) != len(batch_logits):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
batch_probs = torch.sigmoid(batch_logits) # (batch_size, num_queries, 256)
batch_scores = torch.max(batch_probs, dim=-1)[0] # (batch_size, num_queries)
# Convert to [x0, y0, x1, y1] format
batch_boxes = center_to_corners_format(batch_boxes)
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
if isinstance(target_sizes, list):
img_h = torch.Tensor([i[0] for i in target_sizes])
img_w = torch.Tensor([i[1] for i in target_sizes])
else:
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(batch_boxes.device)
batch_boxes = batch_boxes * scale_fct[:, None, :]
results = []
for idx, (scores, boxes, probs) in enumerate(zip(batch_scores, batch_boxes, batch_probs)):
keep = scores > threshold
scores = scores[keep]
boxes = boxes[keep]
# extract text labels
prob = probs[keep]
label_ids = get_phrases_from_posmap(prob > text_threshold, input_ids[idx])
objects_text_labels = self.batch_decode(label_ids)
result = DictWithDeprecationWarning(
{
"scores": scores,
"boxes": boxes,
"text_labels": objects_text_labels,
# TODO: @pavel, set labels to None since v4.51.0 or find a way to extract ids
"labels": objects_text_labels,
}
)
results.append(result)
return results
__all__ = ["GroundingDinoProcessor"]
| transformers/src/transformers/models/grounding_dino/processing_grounding_dino.py/0 | {
"file_path": "transformers/src/transformers/models/grounding_dino/processing_grounding_dino.py",
"repo_id": "transformers",
"token_count": 5200
} | 473 |
# coding=utf-8
# Copyright 2025 Baidu Inc and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...configuration_utils import PretrainedConfig
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
auto_docstring,
)
from ...utils.backbone_utils import BackboneConfigMixin, BackboneMixin, get_aligned_output_features_output_indices
from ..rt_detr.modeling_rt_detr_resnet import RTDetrResNetConvLayer
# TODO: Modular conversion for resnet must be fixed as
# it provides incorrect import for configuration like resnet_resnet
class HGNetV2Config(BackboneConfigMixin, PretrainedConfig):
"""
This is the configuration class to store the configuration of a [`HGNetV2Backbone`]. It is used to instantiate a HGNet-V2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of D-FINE-X-COCO B4 "[ustc-community/dfine_x_coco"](https://huggingface.co/ustc-community/dfine_x_coco").
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
embedding_size (`int`, *optional*, defaults to 64):
Dimensionality (hidden size) for the embedding layer.
depths (`list[int]`, *optional*, defaults to `[3, 4, 6, 3]`):
Depth (number of layers) for each stage.
hidden_sizes (`list[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`):
Dimensionality (hidden size) at each stage.
hidden_act (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"`
are supported.
out_features (`list[str]`, *optional*):
If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc.
(depending on how many stages the model has). If unset and `out_indices` is set, will default to the
corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). If unset and `out_features` is set, will default to the corresponding stages.
If unset and `out_features` is unset, will default to the last stage. Must be in the
same order as defined in the `stage_names` attribute.
stem_channels (`list[int]`, *optional*, defaults to `[3, 32, 48]`):
Channel dimensions for the stem layers:
- First number (3) is input image channels
- Second number (32) is intermediate stem channels
- Third number (48) is output stem channels
stage_in_channels (`list[int]`, *optional*, defaults to `[48, 128, 512, 1024]`):
Input channel dimensions for each stage of the backbone.
This defines how many channels the input to each stage will have.
stage_mid_channels (`list[int]`, *optional*, defaults to `[48, 96, 192, 384]`):
Mid-channel dimensions for each stage of the backbone.
This defines the number of channels used in the intermediate layers of each stage.
stage_out_channels (`list[int]`, *optional*, defaults to `[128, 512, 1024, 2048]`):
Output channel dimensions for each stage of the backbone.
This defines how many channels the output of each stage will have.
stage_num_blocks (`list[int]`, *optional*, defaults to `[1, 1, 3, 1]`):
Number of blocks to be used in each stage of the backbone.
This controls the depth of each stage by specifying how many convolutional blocks to stack.
stage_downsample (`list[bool]`, *optional*, defaults to `[False, True, True, True]`):
Indicates whether to downsample the feature maps at each stage.
If `True`, the spatial dimensions of the feature maps will be reduced.
stage_light_block (`list[bool]`, *optional*, defaults to `[False, False, True, True]`):
Indicates whether to use light blocks in each stage.
Light blocks are a variant of convolutional blocks that may have fewer parameters.
stage_kernel_size (`list[int]`, *optional*, defaults to `[3, 3, 5, 5]`):
Kernel sizes for the convolutional layers in each stage.
stage_numb_of_layers (`list[int]`, *optional*, defaults to `[6, 6, 6, 6]`):
Number of layers to be used in each block of the stage.
use_learnable_affine_block (`bool`, *optional*, defaults to `False`):
Whether to use Learnable Affine Blocks (LAB) in the network.
LAB adds learnable scale and bias parameters after certain operations.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "hgnet_v2"
def __init__(
self,
num_channels=3,
embedding_size=64,
depths=[3, 4, 6, 3],
hidden_sizes=[256, 512, 1024, 2048],
hidden_act="relu",
out_features=None,
out_indices=None,
stem_channels=[3, 32, 48],
stage_in_channels=[48, 128, 512, 1024],
stage_mid_channels=[48, 96, 192, 384],
stage_out_channels=[128, 512, 1024, 2048],
stage_num_blocks=[1, 1, 3, 1],
stage_downsample=[False, True, True, True],
stage_light_block=[False, False, True, True],
stage_kernel_size=[3, 3, 5, 5],
stage_numb_of_layers=[6, 6, 6, 6],
use_learnable_affine_block=False,
initializer_range=0.02,
**kwargs,
):
super().__init__(**kwargs)
self.num_channels = num_channels
self.embedding_size = embedding_size
self.depths = depths
self.hidden_sizes = hidden_sizes
self.hidden_act = hidden_act
self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)]
self._out_features, self._out_indices = get_aligned_output_features_output_indices(
out_features=out_features, out_indices=out_indices, stage_names=self.stage_names
)
self.stem_channels = stem_channels
self.stage_in_channels = stage_in_channels
self.stage_mid_channels = stage_mid_channels
self.stage_out_channels = stage_out_channels
self.stage_num_blocks = stage_num_blocks
self.stage_downsample = stage_downsample
self.stage_light_block = stage_light_block
self.stage_kernel_size = stage_kernel_size
self.stage_numb_of_layers = stage_numb_of_layers
self.use_learnable_affine_block = use_learnable_affine_block
self.initializer_range = initializer_range
if not (
len(stage_in_channels)
== len(stage_mid_channels)
== len(stage_out_channels)
== len(stage_num_blocks)
== len(stage_downsample)
== len(stage_light_block)
== len(stage_kernel_size)
== len(stage_numb_of_layers)
):
raise ValueError("All stage configuration lists must have the same length.")
# General docstring
@auto_docstring
class HGNetV2PreTrainedModel(PreTrainedModel):
config: HGNetV2Config
base_model_prefix = "hgnetv2"
main_input_name = "pixel_values"
_no_split_modules = ["HGNetV2BasicLayer"]
class HGNetV2LearnableAffineBlock(nn.Module):
def __init__(self, scale_value: float = 1.0, bias_value: float = 0.0):
super().__init__()
self.scale = nn.Parameter(torch.tensor([scale_value]), requires_grad=True)
self.bias = nn.Parameter(torch.tensor([bias_value]), requires_grad=True)
def forward(self, hidden_state: Tensor) -> Tensor:
hidden_state = self.scale * hidden_state + self.bias
return hidden_state
class HGNetV2ConvLayer(RTDetrResNetConvLayer):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: int,
stride: int = 1,
groups: int = 1,
activation: str = "relu",
use_learnable_affine_block: bool = False,
):
super().__init__(in_channels, out_channels, kernel_size, stride, activation)
self.convolution = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
groups=groups,
padding=(kernel_size - 1) // 2,
bias=False,
)
if activation and use_learnable_affine_block:
self.lab = HGNetV2LearnableAffineBlock()
else:
self.lab = nn.Identity()
def forward(self, input: Tensor) -> Tensor:
hidden_state = self.convolution(input)
hidden_state = self.normalization(hidden_state)
hidden_state = self.activation(hidden_state)
hidden_state = self.lab(hidden_state)
return hidden_state
class HGNetV2ConvLayerLight(nn.Module):
def __init__(
self, in_channels: int, out_channels: int, kernel_size: int, use_learnable_affine_block: bool = False
):
super().__init__()
self.conv1 = HGNetV2ConvLayer(
in_channels,
out_channels,
kernel_size=1,
activation=None,
use_learnable_affine_block=use_learnable_affine_block,
)
self.conv2 = HGNetV2ConvLayer(
out_channels,
out_channels,
kernel_size=kernel_size,
groups=out_channels,
use_learnable_affine_block=use_learnable_affine_block,
)
def forward(self, hidden_state: Tensor) -> Tensor:
hidden_state = self.conv1(hidden_state)
hidden_state = self.conv2(hidden_state)
return hidden_state
class HGNetV2Embeddings(nn.Module):
def __init__(self, config: HGNetV2Config):
super().__init__()
self.stem1 = HGNetV2ConvLayer(
config.stem_channels[0],
config.stem_channels[1],
kernel_size=3,
stride=2,
activation=config.hidden_act,
use_learnable_affine_block=config.use_learnable_affine_block,
)
self.stem2a = HGNetV2ConvLayer(
config.stem_channels[1],
config.stem_channels[1] // 2,
kernel_size=2,
stride=1,
activation=config.hidden_act,
use_learnable_affine_block=config.use_learnable_affine_block,
)
self.stem2b = HGNetV2ConvLayer(
config.stem_channels[1] // 2,
config.stem_channels[1],
kernel_size=2,
stride=1,
activation=config.hidden_act,
use_learnable_affine_block=config.use_learnable_affine_block,
)
self.stem3 = HGNetV2ConvLayer(
config.stem_channels[1] * 2,
config.stem_channels[1],
kernel_size=3,
stride=2,
activation=config.hidden_act,
use_learnable_affine_block=config.use_learnable_affine_block,
)
self.stem4 = HGNetV2ConvLayer(
config.stem_channels[1],
config.stem_channels[2],
kernel_size=1,
stride=1,
activation=config.hidden_act,
use_learnable_affine_block=config.use_learnable_affine_block,
)
self.pool = nn.MaxPool2d(kernel_size=2, stride=1, ceil_mode=True)
self.num_channels = config.num_channels
def forward(self, pixel_values: Tensor) -> Tensor:
num_channels = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embedding = self.stem1(pixel_values)
embedding = F.pad(embedding, (0, 1, 0, 1))
emb_stem_2a = self.stem2a(embedding)
emb_stem_2a = F.pad(emb_stem_2a, (0, 1, 0, 1))
emb_stem_2a = self.stem2b(emb_stem_2a)
pooled_emb = self.pool(embedding)
embedding = torch.cat([pooled_emb, emb_stem_2a], dim=1)
embedding = self.stem3(embedding)
embedding = self.stem4(embedding)
return embedding
class HGNetV2BasicLayer(nn.Module):
def __init__(
self,
in_channels: int,
middle_channels: int,
out_channels: int,
layer_num: int,
kernel_size: int = 3,
residual: bool = False,
light_block: bool = False,
drop_path: float = 0.0,
use_learnable_affine_block: bool = False,
):
super().__init__()
self.residual = residual
self.layers = nn.ModuleList()
for i in range(layer_num):
temp_in_channels = in_channels if i == 0 else middle_channels
if light_block:
block = HGNetV2ConvLayerLight(
in_channels=temp_in_channels,
out_channels=middle_channels,
kernel_size=kernel_size,
use_learnable_affine_block=use_learnable_affine_block,
)
else:
block = HGNetV2ConvLayer(
in_channels=temp_in_channels,
out_channels=middle_channels,
kernel_size=kernel_size,
use_learnable_affine_block=use_learnable_affine_block,
stride=1,
)
self.layers.append(block)
# feature aggregation
total_channels = in_channels + layer_num * middle_channels
aggregation_squeeze_conv = HGNetV2ConvLayer(
total_channels,
out_channels // 2,
kernel_size=1,
stride=1,
use_learnable_affine_block=use_learnable_affine_block,
)
aggregation_excitation_conv = HGNetV2ConvLayer(
out_channels // 2,
out_channels,
kernel_size=1,
stride=1,
use_learnable_affine_block=use_learnable_affine_block,
)
self.aggregation = nn.Sequential(
aggregation_squeeze_conv,
aggregation_excitation_conv,
)
self.drop_path = nn.Dropout(drop_path) if drop_path else nn.Identity()
def forward(self, hidden_state: Tensor) -> Tensor:
identity = hidden_state
output = [hidden_state]
for layer in self.layers:
hidden_state = layer(hidden_state)
output.append(hidden_state)
hidden_state = torch.cat(output, dim=1)
hidden_state = self.aggregation(hidden_state)
if self.residual:
hidden_state = self.drop_path(hidden_state) + identity
return hidden_state
class HGNetV2Stage(nn.Module):
def __init__(self, config: HGNetV2Config, stage_index: int, drop_path: float = 0.0):
super().__init__()
in_channels = config.stage_in_channels[stage_index]
mid_channels = config.stage_mid_channels[stage_index]
out_channels = config.stage_out_channels[stage_index]
num_blocks = config.stage_num_blocks[stage_index]
num_layers = config.stage_numb_of_layers[stage_index]
downsample = config.stage_downsample[stage_index]
light_block = config.stage_light_block[stage_index]
kernel_size = config.stage_kernel_size[stage_index]
use_learnable_affine_block = config.use_learnable_affine_block
if downsample:
self.downsample = HGNetV2ConvLayer(
in_channels, in_channels, kernel_size=3, stride=2, groups=in_channels, activation=None
)
else:
self.downsample = nn.Identity()
blocks_list = []
for i in range(num_blocks):
blocks_list.append(
HGNetV2BasicLayer(
in_channels if i == 0 else out_channels,
mid_channels,
out_channels,
num_layers,
residual=(i != 0),
kernel_size=kernel_size,
light_block=light_block,
drop_path=drop_path,
use_learnable_affine_block=use_learnable_affine_block,
)
)
self.blocks = nn.ModuleList(blocks_list)
def forward(self, hidden_state: Tensor) -> Tensor:
hidden_state = self.downsample(hidden_state)
for block in self.blocks:
hidden_state = block(hidden_state)
return hidden_state
class HGNetV2Encoder(nn.Module):
def __init__(self, config: HGNetV2Config):
super().__init__()
self.stages = nn.ModuleList([])
for stage_index in range(len(config.stage_in_channels)):
resnet_stage = HGNetV2Stage(config, stage_index)
self.stages.append(resnet_stage)
def forward(
self, hidden_state: Tensor, output_hidden_states: bool = False, return_dict: bool = True
) -> BaseModelOutputWithNoAttention:
hidden_states = () if output_hidden_states else None
for stage in self.stages:
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
hidden_state = stage(hidden_state)
if output_hidden_states:
hidden_states = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return BaseModelOutputWithNoAttention(
last_hidden_state=hidden_state,
hidden_states=hidden_states,
)
class HGNetV2Backbone(HGNetV2PreTrainedModel, BackboneMixin):
def __init__(self, config: HGNetV2Config):
super().__init__(config)
super()._init_backbone(config)
self.depths = config.depths
self.num_features = [config.embedding_size] + config.hidden_sizes
self.embedder = HGNetV2Embeddings(config)
self.encoder = HGNetV2Encoder(config)
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self, pixel_values: Tensor, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None
) -> BackboneOutput:
r"""
Examples:
```python
>>> from transformers import HGNetV2Config, HGNetV2Backbone
>>> import torch
>>> config = HGNetV2Config()
>>> model = HGNetV2Backbone(config)
>>> pixel_values = torch.randn(1, 3, 224, 224)
>>> with torch.no_grad():
... outputs = model(pixel_values)
>>> feature_maps = outputs.feature_maps
>>> list(feature_maps[-1].shape)
[1, 2048, 7, 7]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
embedding_output = self.embedder(pixel_values)
outputs = self.encoder(embedding_output, output_hidden_states=True, return_dict=True)
hidden_states = outputs.hidden_states
feature_maps = ()
for idx, stage in enumerate(self.stage_names):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
output = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=feature_maps,
hidden_states=outputs.hidden_states if output_hidden_states else None,
attentions=None,
)
@auto_docstring(
custom_intro="""
HGNetV2 Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
"""
)
class HGNetV2ForImageClassification(HGNetV2PreTrainedModel):
def __init__(self, config: HGNetV2Config):
super().__init__(config)
self.num_labels = config.num_labels
self.embedder = HGNetV2Embeddings(config)
self.encoder = HGNetV2Encoder(config)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.flatten = nn.Flatten()
self.fc = nn.Linear(config.hidden_sizes[-1], config.num_labels) if config.num_labels > 0 else nn.Identity()
# classification head
self.classifier = nn.ModuleList([self.avg_pool, self.flatten])
# initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> ImageClassifierOutputWithNoAttention:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> import torch
>>> import requests
>>> from transformers import HGNetV2ForImageClassification, AutoImageProcessor
>>> from PIL import Image
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> model = HGNetV2ForImageClassification.from_pretrained("ustc-community/hgnet-v2")
>>> processor = AutoImageProcessor.from_pretrained("ustc-community/hgnet-v2")
>>> inputs = processor(images=image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> outputs.logits.shape
torch.Size([1, 2])
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
embedding_output = self.embedder(pixel_values)
outputs = self.encoder(embedding_output, output_hidden_states=output_hidden_states, return_dict=return_dict)
last_hidden_state = outputs[0]
for layer in self.classifier:
last_hidden_state = layer(last_hidden_state)
logits = self.fc(last_hidden_state)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=loss, logits=logits, hidden_states=outputs.hidden_states)
__all__ = ["HGNetV2Config", "HGNetV2Backbone", "HGNetV2PreTrainedModel", "HGNetV2ForImageClassification"]
| transformers/src/transformers/models/hgnet_v2/modular_hgnet_v2.py/0 | {
"file_path": "transformers/src/transformers/models/hgnet_v2/modular_hgnet_v2.py",
"repo_id": "transformers",
"token_count": 11276
} | 474 |
# coding=utf-8
# Copyright (C) 2025 THL A29 Limited, a Tencent company and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch HunYuanDenseV1 model."""
from typing import Callable, Optional
import torch
import torch.utils.checkpoint
from torch import nn
from transformers.cache_utils import Cache
from transformers.utils import (
logging,
)
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
from ...processing_utils import Unpack
from ...utils import TransformersKwargs
from ..llama.modeling_llama import (
LlamaAttention,
LlamaDecoderLayer,
LlamaForCausalLM,
LlamaForSequenceClassification,
LlamaMLP,
LlamaModel,
LlamaPreTrainedModel,
LlamaRMSNorm,
apply_rotary_pos_emb,
eager_attention_forward,
)
from .configuration_hunyuan_v1_dense import HunYuanDenseV1Config
logger = logging.get_logger(__name__)
class HunYuanDenseV1RMSNorm(LlamaRMSNorm):
pass
class HunYuanDenseV1MLP(LlamaMLP):
def __init__(self, config: HunYuanDenseV1Config, layer_idx=None, is_shared_mlp=False):
super().__init__(config)
self.layer_idx = layer_idx
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
class HunYuanDenseV1Attention(LlamaAttention):
def __init__(self, config: HunYuanDenseV1Config, layer_idx: int):
super().__init__(config, layer_idx)
self.query_layernorm = HunYuanDenseV1RMSNorm(self.head_dim, eps=config.rms_norm_eps)
self.key_layernorm = HunYuanDenseV1RMSNorm(self.head_dim, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
query_states = self.query_layernorm(query_states)
key_states = self.key_layernorm(key_states)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class HunYuanDenseV1DecoderLayer(LlamaDecoderLayer):
def __init__(self, config: HunYuanDenseV1Config, layer_idx: int):
super().__init__()
self.layer_idx = layer_idx
class HunYuanDenseV1PreTrainedModel(LlamaPreTrainedModel):
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
class HunYuanDenseV1RotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: HunYuanDenseV1Config, device=None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
if self.rope_type == "dynamic" and config.rope_scaling["alpha"]:
# DynamicNTKAlphaRotary
self.dim = config.head_dim
base = config.rope_theta * config.rope_scaling.get("alpha") ** (self.dim / (self.dim - 2))
inv_freq = 1.0 / (base ** (torch.arange(0, self.dim, 2).float().to(device) / self.dim))
self.attention_scaling = 1.0
else:
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
class HunYuanDenseV1Model(LlamaModel):
pass
class HunYuanDenseV1ForCausalLM(LlamaForCausalLM):
pass
class HunYuanDenseV1ForSequenceClassification(LlamaForSequenceClassification):
pass
__all__ = [
"HunYuanDenseV1ForCausalLM",
"HunYuanDenseV1Model",
"HunYuanDenseV1PreTrainedModel",
"HunYuanDenseV1ForSequenceClassification",
]
| transformers/src/transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py/0 | {
"file_path": "transformers/src/transformers/models/hunyuan_v1_dense/modular_hunyuan_v1_dense.py",
"repo_id": "transformers",
"token_count": 3227
} | 475 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for IDEFICS.
"""
from typing import Callable, Optional, Union
from urllib.parse import urlparse
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import (
ImagesKwargs,
ProcessingKwargs,
ProcessorMixin,
TextKwargs,
Unpack,
)
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import is_tf_available, is_torch_available
from ...utils.deprecation import deprecate_kwarg
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
IMAGE_TOKEN = "<image>"
class IdeficsImagesKwargs(ImagesKwargs, total=False):
transform: Optional[Callable]
image_size: Optional[dict[str, int]]
image_mean: Optional[Union[float, list[float]]]
image_std: Optional[Union[float, list[float]]]
class IdeficsTextKwargs(TextKwargs, total=False):
add_eos_token: Optional[bool]
add_end_of_utterance_token: Optional[bool]
class IdeficsProcessorKwargs(ProcessingKwargs, total=False):
text_kwargs: IdeficsTextKwargs
images_kwargs: IdeficsImagesKwargs
_defaults = {
"text_kwargs": {
"add_special_tokens": False,
"padding": "longest",
"add_eos_token": False,
},
"images_kwargs": {},
"common_kwargs": {"return_tensors": "pt"},
}
# copied from m4.training.packing
def incremental_to_binary_attention_mask(incremental_mask, return_tensors, num_classes=-1):
# Set elements >= num_classes to -1
if num_classes != -1:
if return_tensors == "pt":
incremental_mask[incremental_mask >= num_classes] = -1
elif return_tensors == "tf":
incremental_mask = tf.where(incremental_mask >= num_classes, -1, incremental_mask)
# Create mask for negative values
if return_tensors == "pt":
negatives = incremental_mask == -1
incremental_mask[negatives] = 0
attn_mask = torch.nn.functional.one_hot(incremental_mask, num_classes=num_classes)
attn_mask[negatives, :] = 0
elif return_tensors == "tf":
negatives = tf.equal(incremental_mask, -1)
incremental_mask = tf.where(negatives, 0, incremental_mask)
attn_mask = tf.one_hot(incremental_mask, depth=num_classes)
# Reshape 'negatives' to add an extra dimension, making it [batch_size, seq_length, 1]
negatives_expanded = tf.expand_dims(negatives, -1)
attn_mask = tf.where(negatives_expanded, tf.zeros_like(attn_mask), attn_mask)
return attn_mask
# copied from m4.training.packing
def image_attention_mask_for_packed_input_ids(input_ids, tokenizer, return_tensors):
if return_tensors == "pt":
return image_attention_mask_for_packed_input_ids_pt(input_ids, tokenizer)
elif return_tensors == "tf":
return image_attention_mask_for_packed_input_ids_tf(input_ids, tokenizer)
def image_attention_mask_for_packed_input_ids_pt(input_ids, tokenizer):
image_attention_mask = torch.full_like(input_ids, fill_value=-1)
next_image_attention_mask = torch.full_like(input_ids, fill_value=-1)
image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
eod_token_id = tokenizer.eos_token_id
for batch_idx in range(input_ids.size(0)):
count = -1
seen_eod = False
for idx, token_id in enumerate(input_ids[batch_idx]):
if token_id == image_token_id:
count += 1
image_attention_mask[batch_idx][idx] = count
seen_eod = False
else:
image_attention_mask[batch_idx][idx] = count
if seen_eod:
image_attention_mask[batch_idx][idx] = -1
if token_id == eod_token_id:
seen_eod = True
for batch_idx in range(input_ids.size(0)):
count = -1
seen_eod = False
for idx in range(input_ids[batch_idx].size(0) - 1, -1, -1):
token_id = input_ids[batch_idx][idx]
if token_id == image_token_id:
count += 1
next_image_attention_mask[batch_idx][idx] = count
seen_eod = False
else:
next_image_attention_mask[batch_idx][idx] = count
if token_id == eod_token_id:
seen_eod = True
if seen_eod:
next_image_attention_mask[batch_idx][idx] = -1
non_negative_indices = next_image_attention_mask[batch_idx] != -1
next_image_attention_mask[batch_idx][non_negative_indices] -= count
next_image_attention_mask[batch_idx][non_negative_indices] *= -1
return image_attention_mask, next_image_attention_mask
def image_attention_mask_for_packed_input_ids_tf(input_ids, tokenizer):
image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
eod_token_id = tokenizer.eos_token_id
batch_size = tf.shape(input_ids)[0]
image_attention_mask = tf.fill(tf.shape(input_ids), -1)
next_image_attention_mask = tf.fill(tf.shape(input_ids), -1)
for batch_idx in range(batch_size):
count = -1
seen_eod = False
seq_length = tf.shape(input_ids)[1]
for idx in range(seq_length - 1, -1, -1):
token_id = input_ids[batch_idx, idx].numpy()
if token_id == image_token_id:
count += 1
indices = [[batch_idx, idx]]
updates = [count]
image_attention_mask = tf.tensor_scatter_nd_update(image_attention_mask, indices, updates)
next_image_attention_mask = tf.tensor_scatter_nd_update(next_image_attention_mask, indices, updates)
elif token_id == eod_token_id and not seen_eod:
seen_eod = True
count = 0
indices = [[batch_idx, idx]]
updates = [count]
next_image_attention_mask = tf.tensor_scatter_nd_update(next_image_attention_mask, indices, updates)
if seen_eod and token_id != eod_token_id:
indices = [[batch_idx, idx]]
updates = [-1]
next_image_attention_mask = tf.tensor_scatter_nd_update(next_image_attention_mask, indices, updates)
return image_attention_mask, next_image_attention_mask
def is_url(string):
"""Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately
invalidated the url"""
if " " in string:
return False
result = urlparse(string)
return all([result.scheme, result.netloc])
class IdeficsProcessor(ProcessorMixin):
r"""
Constructs a IDEFICS processor which wraps a LLama tokenizer and IDEFICS image processor into a single processor.
[`IdeficsProcessor`] offers all the functionalities of [`IdeficsImageProcessor`] and [`LlamaTokenizerFast`]. See
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
Args:
image_processor (`IdeficsImageProcessor`):
An instance of [`IdeficsImageProcessor`]. The image processor is a required input.
tokenizer (`LlamaTokenizerFast`):
An instance of [`LlamaTokenizerFast`]. The tokenizer is a required input.
image_size (`int`, *optional*, defaults to 224):
Image size (assuming a square image)
add_end_of_utterance_token (`str`, *optional*):
The string representation of token representing end of utterance
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "IdeficsImageProcessor"
tokenizer_class = "LlamaTokenizerFast"
def __init__(self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs):
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
self.image_token_id = (
tokenizer.image_token_id
if hasattr(tokenizer, "image_token")
else tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
)
self.default_image_dims = (
self.image_processor.image_num_channels,
self.image_processor.image_size,
self.image_processor.image_size,
)
self.tokenizer_was_trained_with_end_of_utterance_token = (
"<end_of_utterance>" in self.tokenizer.special_tokens_map.get("additional_special_tokens", [])
)
@deprecate_kwarg(old_name="prompts", version="5.0.0", new_name="text", raise_if_both_names=True)
def __call__(
self,
images: Union[ImageInput, list[ImageInput], str, list[str], list[list[str]]] = None,
text: Union[
TextInput,
PreTokenizedInput,
list[TextInput],
list[PreTokenizedInput],
list[list[TextInput]],
list[list[PreTokenizedInput]],
] = None,
audio=None,
videos=None,
**kwargs: Unpack[IdeficsProcessorKwargs],
) -> BatchFeature:
"""This method takes batched or non-batched prompts made of text and images and converts them into prompts that
the model was trained on and prepares the image pixel values for the model to process.
Args:
images (`Union[ImageInput, list[ImageInput], str, list[str], list[list[str]]]`):
either a single image or a batched list of images - can be passed in when text contains only text prompts,
in order to use the image-text-to-text behavior.
text (`Union[list[TextInput], [list[list[TextInput]]]]`):
either a single prompt or a batched list of prompts - see the detailed description immediately after
the end of the arguments doc section.
return_tensors (`str` or `TensorType`, *optional*, defaults to `TensorType.PYTORCH`):
The type of tensors to return. Can be one of:
- `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
Returns:
a dict with entries: `input_ids`, `attention_mask`, `pixel_values`, `image_attention_mask` which can be
directly passed to `model.generate`
Detailed explanation:
Each entry in `text` is either a text to be passed as is or an image that will be processed.
An image can be either an image object (`PIL.Image`) or a url from which the image can be retrieved.
When the processor encounters an image it'll inject `<fake_token_around_image><image><fake_token_around_image>`
entry into the prompt.
Example:
```python
checkpoint = "HuggingFaceM4/idefics-9b"
processor = AutoProcessor.from_pretrained(checkpoint)
url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg"
img = processor.image_processor.fetch_images([url])[0]
prompts = [
"User:",
img,
"Describe this image.\nAssistant: An image of two kittens in grass.\n",
"User:",
"https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg",
"Describe this image.\nAssistant:",
]
inputs = processor(text=prompts, return_tensors="pt")
generated_ids = model.generate(**inputs, max_length=100)
generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
```
In this example the `prompts` will be converted into:
```
<s>User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
Assistant: An image of two kittens in grass.
User:<fake_token_around_image><image><fake_token_around_image>Describe this image.
Assistant:'
```
and the two images will be massaged using [`IdeficsImageProcessor.__call__`] method and placed inside the
`pixel_values` dict entry of the return value.
This example also exemplifies that images can be passed as objects or as text urls. It can be seen that the
first image is passed as object and the second one as a url.
To do training do:
```python
image_transform = transforms.Compose(
[
transforms.RandomResizedCrop(
(w, h), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.ToTensor(),
transforms.Normalize(mean=self.image_mean, std=self.image_std),
]
)
inputs = processor(text=prompts, transform=image_transform, return_tensors="pt")
```
In order to help debug prompt generation enable `debug=True` which will show you what's happening.
"""
if images is None and text is None:
raise ValueError("You need to specify either `text` or `images` and `text`.")
if images is None:
# assuming the user wants to use the old behavior with prompts as the only argument
prompts = text
elif text is not None:
# Assuming image-text-to-text behavior:
# Check if batched images are provided
if not isinstance(images, (list, tuple)):
images = [images]
if isinstance(text, str):
text = [text]
# Check if batched images and text are in the correct format
if isinstance(text, (list, tuple)) and len(text) != len(images):
raise ValueError(
"When providing both images and text arguments, the number of text prompts should be the same as the number of images."
"If you want to have several images per prompt, images should be nested as such: images=[[img1, img2], [img3, img4], ...] for text=[prompt1, prompt2, ...]."
)
# Check that only text is present in the prompts
if not all(isinstance(i, str) for i in text):
raise ValueError("When using the image-text-to-text behavior, the prompts should only contain text.")
if isinstance(images[0], (list, tuple)):
# if nested images, nest text as well
text = [[i] for i in text]
prompts = list(zip(images, text))
output_kwargs = self._merge_kwargs(
IdeficsProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
add_eos_token = output_kwargs["text_kwargs"].pop("add_eos_token", False)
add_end_of_utterance_token = output_kwargs["text_kwargs"].pop("add_end_of_utterance_token", None)
# if the value isn't overridden by the user, check if the tokenizer was trained with this token and then use it
if add_end_of_utterance_token is None:
add_end_of_utterance_token = self.tokenizer_was_trained_with_end_of_utterance_token
# turn non-batched prompts into batched
if not any(isinstance(i, (list, tuple)) for i in prompts):
prompts = [prompts]
fake_token = "<fake_token_around_image>"
image_token = "<image>"
end_of_utterance_token = "<end_of_utterance>"
def image_tokens(last_was_image):
if last_was_image:
return image_token + fake_token
else:
return fake_token + image_token + fake_token
all_prompts = []
all_images = []
for sample in prompts:
# the model was trained on samples starting with <s>
full_text = f"{self.tokenizer.bos_token}"
# an image can either be an image object in the item or the url, everything else is a verbatim prompt text
image_objects = []
last_was_image = False
last_was_text = False
for i, item in enumerate(sample):
if i > 0:
last_was_text = bool(not last_was_image)
if isinstance(item, str):
item = item.strip(" ")
if is_url(item):
image = self.image_processor.fetch_images(item)
full_text += image_tokens(last_was_image)
image_objects.append(image)
last_was_image = True
else:
# we add end_of_utterance_token between each subsequent text prompts (but not at the last one!)
if add_end_of_utterance_token and last_was_text:
full_text += end_of_utterance_token
full_text += item
last_was_image = False
else:
# must be an image obj
full_text += image_tokens(last_was_image)
image_objects.append(item)
last_was_image = True
if add_eos_token:
full_text += self.tokenizer.eos_token
image_objects = self.image_processor(image_objects, **output_kwargs["images_kwargs"])
all_prompts.append(full_text)
all_images.append(image_objects)
# For BC
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", "pt")
text_encoding = self.tokenizer(all_prompts, **output_kwargs["text_kwargs"])
all_texts = text_encoding["input_ids"]
all_attention_masks = text_encoding["attention_mask"]
# max_num_images has to be at least 1 even when there are no images
max_num_images = max(len(x) for x in all_images)
max_num_images = max(1, max_num_images)
at_least_one_image = sum(len(x) for x in all_images) > 0
output_input_ids = []
output_images = []
output_attention_masks = []
for text_single, attention_mask, extracted_images in zip(all_texts, all_attention_masks, all_images):
padded_input_ids = text_single
image_count = padded_input_ids.count(self.image_token_id)
local_max_num_images = min(image_count, max_num_images)
current_images = extracted_images[:local_max_num_images]
if len(current_images) > 0:
if return_tensors == "pt":
padded_image_tensor = torch.zeros(max_num_images, *current_images.size()[1:])
padded_image_tensor[: current_images.size(0)] = current_images
elif return_tensors == "tf":
# Assuming current_images is a TensorFlow tensor
# Get the shape of current_images, excluding the first dimension
image_shape = tf.shape(current_images)[1:]
# Create a shape for the padded_image_tensor
padded_shape = tf.concat([[max_num_images], image_shape], axis=0)
# Create the padded_image_tensor of zeros
padded_image_tensor = tf.zeros(padded_shape, dtype=current_images.dtype)
# Get the number of images (assuming current_images has shape [num_images, height, width, channels])
num_images = tf.shape(current_images)[0]
# Update the padded_image_tensor with the values from current_images
indices = tf.reshape(tf.range(num_images), (-1, 1))
updates = current_images
padded_image_tensor = tf.tensor_scatter_nd_update(padded_image_tensor, indices, updates)
else:
if return_tensors == "pt":
padded_image_tensor = torch.zeros(max_num_images, *self.default_image_dims)
elif return_tensors == "tf":
padded_image_tensor = tf.zeros((max_num_images, *self.default_image_dims))
output_images.append(padded_image_tensor)
if return_tensors == "pt":
output_input_ids.append(torch.tensor(padded_input_ids))
output_attention_masks.append(torch.tensor(attention_mask))
elif return_tensors == "tf":
output_input_ids.append(tf.convert_to_tensor(padded_input_ids, dtype=tf.int32))
output_attention_masks.append(attention_mask)
if return_tensors == "pt":
output_input_ids = torch.stack(output_input_ids)
output_images = torch.stack(output_images)
output_attention_masks = torch.stack(output_attention_masks)
elif return_tensors == "tf":
output_input_ids = tf.stack(output_input_ids)
output_images = tf.stack(output_images)
output_attention_masks = tf.stack(output_attention_masks)
if at_least_one_image:
image_attention_mask, _ = image_attention_mask_for_packed_input_ids(
output_input_ids, self.tokenizer, return_tensors
)
image_attention_mask = incremental_to_binary_attention_mask(
image_attention_mask, return_tensors, num_classes=max_num_images
)
else:
# in full language mode we set the image mask to all-0s
if return_tensors == "pt":
image_attention_mask = torch.zeros(
output_input_ids.shape[0], output_input_ids.shape[1], 1, dtype=torch.bool
)
elif return_tensors == "tf":
image_attention_mask = tf.zeros(
(output_input_ids.shape[0], output_input_ids.shape[1], 1), dtype=tf.bool
)
return BatchFeature(
data={
"input_ids": output_input_ids,
"attention_mask": output_attention_masks,
"pixel_values": output_images,
"image_attention_mask": image_attention_mask,
}
)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(tokenizer_input_names + image_processor_input_names + ["image_attention_mask"])
__all__ = ["IdeficsProcessor"]
| transformers/src/transformers/models/idefics/processing_idefics.py/0 | {
"file_path": "transformers/src/transformers/models/idefics/processing_idefics.py",
"repo_id": "transformers",
"token_count": 10331
} | 476 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for Idefics3.
"""
import re
from itertools import accumulate
from typing import TYPE_CHECKING, Optional, Union
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput, is_valid_image, load_image
from ...processing_utils import ImagesKwargs, MultiModalData, ProcessingKwargs, ProcessorMixin, Unpack
from ...tokenization_utils_base import AddedToken, BatchEncoding, TextInput
from ...utils import logging
if TYPE_CHECKING:
from ...tokenization_utils_base import PreTokenizedInput
logger = logging.get_logger(__name__)
def is_url(val) -> bool:
return isinstance(val, str) and val.startswith("http")
def is_image_or_image_url(elem):
return is_url(elem) or is_valid_image(elem)
def _prompt_split_image(image_seq_len, image_rows, image_cols, fake_token_around_image, image_token, global_img_token):
"""Prompt with expanded image tokens for when the image is split into patches."""
text_split_images = ""
for n_h in range(image_rows):
for n_w in range(image_cols):
text_split_images += (
f"{fake_token_around_image}" + f"<row_{n_h + 1}_col_{n_w + 1}>" + f"{image_token}" * image_seq_len
)
text_split_images += "\n"
text_split_images += (
f"\n{fake_token_around_image}"
+ f"{global_img_token}"
+ f"{image_token}" * image_seq_len
+ f"{fake_token_around_image}"
)
return text_split_images
def _prompt_single_image(image_seq_len, fake_token_around_image, image_token, global_img_token):
"""Prompt with expanded image tokens for a single image."""
return (
f"{fake_token_around_image}"
+ f"{global_img_token}"
+ f"{image_token}" * image_seq_len
+ f"{fake_token_around_image}"
)
def get_image_prompt_string(
image_rows, image_cols, image_seq_len, fake_token_around_image, image_token, global_img_token
):
if image_rows == 0 and image_cols == 0:
return _prompt_single_image(
image_seq_len,
fake_token_around_image=fake_token_around_image,
image_token=image_token,
global_img_token=global_img_token,
)
return _prompt_split_image(
image_seq_len, image_rows, image_cols, fake_token_around_image, image_token, global_img_token
)
class Idefics3ImagesKwargs(ImagesKwargs, total=False):
return_row_col_info: Optional[bool]
max_image_size: Optional[dict[str, int]]
class Idefics3ProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: Idefics3ImagesKwargs
_defaults = {
"text_kwargs": {
"add_special_tokens": True,
"padding": False,
"is_split_into_words": False,
"return_mm_token_type_ids": False,
},
"images_kwargs": {
"return_row_col_info": True,
},
}
Idefics3ProcessorKwargs.__annotations__["images_kwargs"] = Idefics3ImagesKwargs # python 3.8 compatibility
class Idefics3Processor(ProcessorMixin):
r"""
Constructs a Idefics3 processor which wraps a LLama tokenizer and Idefics3 image processor into a single processor.
[`Idefics3Processor`] offers all the functionalities of [`Idefics3ImageProcessor`] and [`Idefics3TokenizerFast`]. See
the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
Args:
image_processor (`Idefics3ImageProcessor`):
An instance of [`Idefics3ImageProcessor`]. The image processor is a required input.
tokenizer (`PreTrainedTokenizerBase`, *optional*):
An instance of [`PreTrainedTokenizerBase`]. This should correspond with the model's text model. The tokenizer is a required input.
image_seq_len (`int`, *optional*, defaults to 169):
The length of the image sequence i.e. the number of <image> tokens per image in the input.
This parameter is used to build the string from the input prompt and image tokens and should match the
value the model used. It is computed as: image_seq_len = int(((image_size // patch_size) ** 2) / (scale_factor**2))
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "Idefics3ImageProcessor"
tokenizer_class = "AutoTokenizer"
def __init__(
self, image_processor, tokenizer=None, image_seq_len: int = 169, chat_template: Optional[str] = None, **kwargs
):
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
self.fake_image_token = AddedToken("<fake_token_around_image>", normalized=False, special=True).content
self.image_token = AddedToken("<image>", normalized=False, special=True).content
self.end_of_utterance_token = AddedToken("<end_of_utterance>", normalized=False, special=True).content
self.global_image_tag = "<global-img>" # https://github.com/huggingface/transformers/pull/32473/files/8063e5e17362571b693f1db95167f5443a3be1b2#r1734825341
self.image_seq_len = image_seq_len
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
self.fake_image_token_id = tokenizer.convert_tokens_to_ids(self.fake_image_token)
self.global_image_token_id = tokenizer.convert_tokens_to_ids(self.global_image_tag)
self.row_col_ids = [
tokenizer.convert_tokens_to_ids(f"<row_{i + 1}_col_{j + 1}>") for i in range(6) for j in range(6)
]
# This regex matches one or more occurrences of <global-img> tags (optionally surrounded by newline characters)
# or <row_x_col_y> tags (where x and y are digits, also optionally surrounded by newline characters).
self._regex_to_remove_extra_special_tokens = re.compile(r"(\n?<global-img>\n?|<row_\d+_col_\d+>\n?)+")
tokens_to_add = {
"additional_special_tokens": [
self.fake_image_token,
self.image_token,
self.end_of_utterance_token,
]
}
tokenizer.add_special_tokens(tokens_to_add)
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
super().__init__(image_processor, tokenizer, chat_template=chat_template, **kwargs)
def _extract_images_from_prompts(self, prompts):
prompt_images = []
for prompt in prompts:
images = []
for elem in prompt:
if is_valid_image(elem):
images.append(elem)
elif is_url(elem):
images.append(load_image(elem))
prompt_images.append(images)
return prompt_images
def __call__(
self,
images: Union[ImageInput, list[ImageInput], list[list[ImageInput]]] = None,
text: Union[TextInput, "PreTokenizedInput", list[TextInput], list["PreTokenizedInput"]] = None,
audio=None,
videos=None,
image_seq_len: Optional[int] = None,
**kwargs: Unpack[Idefics3ProcessorKwargs],
) -> BatchEncoding:
"""
Processes the input prompts and returns a BatchEncoding.
Example:
```python
>>> import requests
>>> from transformers import Idefics3Processor
>>> from transformers.image_utils import load_image
>>> processor = Idefics3Processor.from_pretrained("HuggingFaceM4/Idefics3-8B-Llama3")
>>> processor.image_processor.do_image_splitting = False # Force as False to simplify the example
>>> url1 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
>>> url2 = "https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg"
>>> image1, image2 = load_image(url1), load_image(url2)
>>> images = [[image1], [image2]]
>>> text = [
... "<image>In this image, we see",
... "bla bla bla<image>",
... ]
>>> outputs = processor(images=images, text=text, return_tensors="pt", padding=True)
>>> input_ids = outputs.input_ids
>>> input_tokens = processor.tokenizer.batch_decode(input_ids)
>>> print(input_tokens)
['<|begin_of_text|><fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image> In this image, we see', '<|reserved_special_token_0|><|reserved_special_token_0|><|reserved_special_token_0|><|begin_of_text|>bla bla bla<fake_token_around_image><global-img>((<image>)*169)<fake_token_around_image>']
```
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, *optional*):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. If is of type `list[ImageInput]`, it's assumed that this is for a single prompt i.e. of batch size 1.
text (`Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]]`, *optional*):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
Wherever an image token, `<image>` is encountered it is expanded to
`<fake_token_around_image>` + `<row_x_col_y>` + `<image>` * `image_seq_len` * <fake_token_around_image>`.
image_seq_len (`int`, *optional*):
The length of the image sequence. If not provided, the default value of self.image_seq_len is used.
image_seq_len should be equal to int(((image_size // patch_size) ** 2) / (scale_factor**2))
return_tensors (`Union[str, TensorType]`, *optional*):
If set, will return tensors of a particular framework. See [`PreTrainedTokenizerFast.__call__`] for more
information.
"""
if text is None and images is None:
raise ValueError("You must provide either `text` or `images`.")
output_kwargs = self._merge_kwargs(
Idefics3ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
image_seq_len = image_seq_len if image_seq_len is not None else self.image_seq_len
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
n_images_in_text = []
n_images_in_images = []
inputs = {}
if text is not None:
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
n_images_in_text = [sample.count(self.image_token) for sample in text]
if images is not None:
if is_image_or_image_url(images):
images = [[images]]
elif isinstance(images, (list, tuple)) and is_image_or_image_url(images[0]):
if text is not None:
if sum(n_images_in_text) != len(images):
raise ValueError(
f"The total number of {self.image_token} tokens in the prompts should be the same as the number of images passed."
f" Found {sum(n_images_in_text)} {self.image_token} tokens and {len(images)} images."
)
# Reorganize the images to match the prompts
cumsum_images_in_text = [0] + list(accumulate(n_images_in_text))
images = [
images[cumsum_images_in_text[i] : cumsum_images_in_text[i + 1]]
for i in range(len(n_images_in_text))
]
else:
images = [images]
elif (
not isinstance(images, (list, tuple))
and not isinstance(images[0], (list, tuple))
and not is_image_or_image_url(images[0][0])
):
raise ValueError(
"Invalid input images. Please provide a single image or a list of images or a list of list of images."
)
n_images_in_images = [len(sample) for sample in images]
# Load images if they are URLs
images = [[load_image(im) if is_url(im) else im for im in sample] for sample in images]
image_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
inputs.update(image_inputs)
if text is not None:
if n_images_in_images != n_images_in_text:
raise ValueError(
f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same."
)
image_rows = inputs.pop("rows", [[0] * len(text)])
image_cols = inputs.pop("cols", [[0] * len(text)])
fake_image_token = self.fake_image_token
image_token = self.image_token
global_img_token = self.global_image_tag
prompt_strings = []
batch_image_seq_lengths = []
for sample, sample_rows, sample_cols in zip(text, image_rows, image_cols):
# Replace the image token with fake tokens around the expanded image token sequence of length `image_seq_len`
image_prompt_strings = []
image_seq_lengths = []
for n_rows, n_cols in zip(sample_rows, sample_cols):
image_prompt_string = get_image_prompt_string(
n_rows,
n_cols,
image_seq_len,
image_token=image_token,
fake_token_around_image=fake_image_token,
global_img_token=global_img_token,
)
# Add +2 and +3 for special BOI/EOI/fake_image_wrapper tokens
row_length = (self.image_seq_len + 2) * n_cols + 1
image_seq_lengths.append((self.image_seq_len + 3) + row_length * n_rows)
image_prompt_strings.append(image_prompt_string)
batch_image_seq_lengths.append(image_seq_lengths)
split_sample = sample.split(image_token)
if len(split_sample) == 0:
raise ValueError("The image token should be present in the text.")
# Place in the image prompt strings where the image tokens are
sample = split_sample[0]
for i, image_prompt_string in enumerate(image_prompt_strings):
sample += image_prompt_string + split_sample[i + 1]
prompt_strings.append(sample)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"])
inputs.update(text_inputs)
elif text is not None:
if any(n_images_in_text):
raise ValueError(
f"Found {sum(n_images_in_text)} {self.image_token} tokens in the text but no images were passed."
)
text_inputs = self.tokenizer(text=text, **output_kwargs["text_kwargs"])
inputs.update(text_inputs)
if return_mm_token_type_ids:
array_ids = np.array(inputs["input_ids"])
mm_token_type_ids = np.zeros_like(array_ids)
for i, seq_lengths in enumerate(batch_image_seq_lengths):
image_start_positions = np.where(array_ids[i] == self.fake_image_token_id)[0]
j = 0
for seq_len in seq_lengths:
if j >= len(image_start_positions):
break
start = image_start_positions[j]
end = start + seq_len
mm_token_type_ids[i, start:end] = 1
j = np.searchsorted(image_start_positions, end)
inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data=inputs, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Idefics3ProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
num_image_row_cols = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
base_image_length = self.image_seq_len + 3
col_length = self.image_seq_len + 2
num_image_tokens = []
num_image_patches = []
for num_patches, num_rows, num_cols in num_image_row_cols:
row_length = col_length * num_cols + 1
num_image_tokens.append(base_image_length + (row_length * num_rows))
num_image_patches.append(num_patches)
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
__all__ = ["Idefics3Processor"]
| transformers/src/transformers/models/idefics3/processing_idefics3.py/0 | {
"file_path": "transformers/src/transformers/models/idefics3/processing_idefics3.py",
"repo_id": "transformers",
"token_count": 8665
} | 477 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/internvl/modular_internvl.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_internvl.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
from dataclasses import dataclass
from typing import Callable, Optional, Union
import torch
import torch.nn as nn
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...generation import GenerationMixin
from ...integrations import use_kernel_forward_from_hub
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast, BaseModelOutputWithPooling
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import ModelOutput, TransformersKwargs, auto_docstring, can_return_tuple, torch_int
from ..auto import AutoModel
from .configuration_internvl import InternVLConfig, InternVLVisionConfig
@use_kernel_forward_from_hub("RMSNorm")
class InternVLVisionRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
InternVLVisionRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
key_states = key
value_states = value
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
# No upcasting of the attention weights to float32 in this implementation
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class InternVLVisionAttention(nn.Module):
"""Attention Class for InternVL Vision Encoder"""
def __init__(self, config: InternVLVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
proj_dropout = config.projection_dropout
qk_norm = config.use_qk_norm
# Needed for flash attention
self.is_causal = False
self.q_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=config.attention_bias)
self.k_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=config.attention_bias)
self.v_proj = nn.Linear(self.embed_dim, self.num_heads * self.head_dim, bias=config.attention_bias)
self.projection_layer = nn.Linear(self.embed_dim, self.embed_dim)
self.projection_dropout = nn.Dropout(proj_dropout) if proj_dropout > 0 else nn.Identity()
self.q_norm = InternVLVisionRMSNorm(self.embed_dim) if qk_norm else nn.Identity()
self.k_norm = InternVLVisionRMSNorm(self.embed_dim) if qk_norm else nn.Identity()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[torch.Tensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
):
batch_size, seq_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = self.q_norm(query_states)
key_states = self.k_norm(key_states)
query_states = query_states.reshape(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.reshape(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scale,
is_causal=False,
**kwargs,
)
attn_output = attn_output.reshape(batch_size, seq_len, self.embed_dim)
output = self.projection_layer(attn_output)
output = self.projection_dropout(output)
outputs = (output, attn_weights) if output_attentions else (output, None)
return outputs
@auto_docstring
class InternVLVisionPreTrainedModel(PreTrainedModel):
config: InternVLVisionConfig
base_model_prefix = "internvl_vision"
main_input_name = "pixel_values"
supports_gradient_checkpointing = True
_no_split_modules = ["InternVLVisionLayer"]
_supports_sdpa = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_attention_backend = True
def _init_weights(self, module):
"""Initialize the weights"""
super()._init_weights(module)
if isinstance(module, InternVLVisionEmbeddings):
module.cls_token.data.zero_()
if module.mask_token is not None:
module.mask_token.data.zero_()
if module.position_embeddings is not None:
module.position_embeddings.data.zero_()
elif isinstance(module, InternVLVisionLayer):
module.lambda_1.data.fill_(self.config.layer_scale_init_value)
module.lambda_2.data.fill_(self.config.layer_scale_init_value)
@dataclass
@auto_docstring(
custom_intro="""
Class for outputs of [`InternVLVisionModel`].
"""
)
class InternVLVisionModelOutputWithPooling(BaseModelOutputWithPooling):
r"""
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Average of the last layer hidden states of the patch tokens (excluding the *[CLS]* token) if
*config.use_mean_pooling* is set to True. If set to False, then the final hidden state of the *[CLS]* token
will be returned.
"""
class InternVLVisionPatchEmbeddings(nn.Module):
"""
This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
`hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
Transformer.
"""
def __init__(self, config):
super().__init__()
image_size, patch_size = config.image_size, config.patch_size
num_channels, hidden_size = config.num_channels, config.hidden_size
num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
patch_shape = (image_size[0] // patch_size[0], image_size[1] // patch_size[1])
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_patches = num_patches
self.patch_shape = patch_shape
self.projection = nn.Conv2d(num_channels, hidden_size, kernel_size=patch_size, stride=patch_size)
def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
batch_size, num_channels, height, width = pixel_values.shape
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration."
)
embeddings = self.projection(pixel_values)
patch_height, patch_width = embeddings.shape[2], embeddings.shape[3]
embeddings = embeddings.flatten(2).transpose(1, 2)
return embeddings, (patch_height, patch_width)
# Based on timm implementation, which can be found here:
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
class InternVLVisionEmbeddings(nn.Module):
"""
Construct the CLS token, position and patch embeddings. Optionally, also the mask token.
"""
def __init__(self, config: InternVLVisionConfig) -> None:
super().__init__()
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
if config.use_mask_token:
self.mask_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
else:
self.mask_token = None
self.patch_embeddings = InternVLVisionPatchEmbeddings(config)
self.patch_size = config.patch_size
self.image_size = (
config.image_size
if isinstance(config.image_size, collections.abc.Iterable)
else (config.image_size, config.image_size)
)
num_patches = self.patch_embeddings.num_patches
if config.use_absolute_position_embeddings:
self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 1, config.hidden_size))
else:
self.position_embeddings = None
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
num_positions = self.position_embeddings.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embeddings
class_pos_embed = self.position_embeddings[:, :1]
patch_pos_embed = self.position_embeddings[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size[0]
new_width = width // self.patch_size[1]
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.BoolTensor] = None,
) -> torch.Tensor:
_, _, height, width = pixel_values.shape
embeddings, (patch_height, patch_width) = self.patch_embeddings(pixel_values)
batch_size, seq_len, _ = embeddings.size()
if bool_masked_pos is not None:
mask_tokens = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_tokens
w = bool_masked_pos.unsqueeze(-1).type_as(mask_tokens)
embeddings = embeddings * (1 - w) + mask_tokens * w
cls_tokens = self.cls_token.expand(batch_size, -1, -1)
embeddings = torch.cat((cls_tokens, embeddings), dim=1)
if self.position_embeddings is not None:
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
embeddings = self.dropout(embeddings)
return embeddings, (patch_height, patch_width)
class InternVLVisionMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.activation_fn = ACT2FN[config.hidden_act]
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.fc1(hidden_states)
hidden_states = self.activation_fn(hidden_states)
hidden_states = self.fc2(hidden_states)
return hidden_states
NORM2FN = {"layer_norm": nn.LayerNorm, "rms_norm": InternVLVisionRMSNorm}
class InternVLVisionLayer(GradientCheckpointingLayer):
"""This corresponds to the Block class in the timm implementation."""
def __init__(self, config: InternVLVisionConfig) -> None:
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = InternVLVisionAttention(config)
self.mlp = InternVLVisionMLP(config)
# InternVL uses different layernorm implementations for different models
self.layernorm_before = NORM2FN[config.norm_type](config.hidden_size, eps=config.layer_norm_eps)
self.layernorm_after = NORM2FN[config.norm_type](config.hidden_size, eps=config.layer_norm_eps)
init_values = config.layer_scale_init_value
self.lambda_1 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
self.lambda_2 = nn.Parameter(init_values * torch.ones(config.hidden_size), requires_grad=True)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
) -> Union[tuple[torch.Tensor], tuple[torch.Tensor, torch.Tensor]]:
attention_output, attention_weights = self.attention(
self.layernorm_before(hidden_states), # in InternVLVision, layernorm is applied before self-attention
output_attentions=output_attentions,
)
attention_output = self.lambda_1 * attention_output
# first residual connection
hidden_states = attention_output + hidden_states
# in InternVLVision, layernorm is also applied after self-attention
layer_output = self.layernorm_after(hidden_states)
layer_output = self.mlp(layer_output)
layer_output = self.dropout(layer_output)
if self.lambda_2 is not None:
layer_output = self.lambda_2 * layer_output
# second residual connection
layer_output = layer_output + hidden_states
return layer_output, attention_weights
class InternVLVisionEncoder(nn.Module):
def __init__(self, config: InternVLVisionConfig) -> None:
super().__init__()
self.config = config
self.layer = nn.ModuleList([InternVLVisionLayer(config) for i in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
hidden_states: torch.Tensor,
output_attentions: bool = False,
output_hidden_states: bool = False,
) -> Union[tuple, BaseModelOutput]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, output_attentions)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
class InternVLVisionModel(InternVLVisionPreTrainedModel):
def __init__(self, config: InternVLVisionConfig) -> None:
super().__init__(config)
self.config = config
self.embeddings = InternVLVisionEmbeddings(config)
self.encoder = InternVLVisionEncoder(config)
self.layernorm = (
nn.Identity() if config.use_mean_pooling else nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.patch_embeddings
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: torch.Tensor,
bool_masked_pos: Optional[torch.BoolTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
) -> Union[tuple, InternVLVisionModelOutputWithPooling]:
r"""
bool_masked_pos (`torch.BoolTensor` of shape `(batch_size, num_patches)`, *optional*):
Boolean masked positions. Indicates which patches are masked (1) and which aren't (0).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
embedding_output, _ = self.embeddings(pixel_values, bool_masked_pos=bool_masked_pos)
encoder_outputs = self.encoder(
embedding_output,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = encoder_outputs[0]
sequence_output = self.layernorm(sequence_output)
return InternVLVisionModelOutputWithPooling(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring
class InternVLPreTrainedModel(PreTrainedModel):
config: InternVLConfig
base_model_prefix = ""
supports_gradient_checkpointing = True
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_flex_attn = True
_supports_attention_backend = True
class InternVLMultiModalProjector(nn.Module):
def __init__(self, config: InternVLConfig):
super().__init__()
self.layer_norm = nn.LayerNorm(config.vision_config.hidden_size * int(1 / config.downsample_ratio) ** 2)
self.linear_1 = nn.Linear(
config.vision_config.hidden_size * int(1 / config.downsample_ratio) ** 2, config.text_config.hidden_size
)
self.act = ACT2FN[config.projector_hidden_act]
self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size)
def forward(self, image_features):
hidden_states = self.layer_norm(image_features)
hidden_states = self.linear_1(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.linear_2(hidden_states)
return hidden_states
@dataclass
@auto_docstring(
custom_intro="""
Base class for InternVL outputs, with hidden states and attentions.
"""
)
class InternVLModelOutputWithPast(BaseModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
image_hidden_states: Optional[torch.FloatTensor] = None
@auto_docstring(
custom_intro="""
The InternVL model which consists of a vision backbone and a language model, without a language modeling head.
"""
)
class InternVLModel(InternVLPreTrainedModel):
_checkpoint_conversion_mapping = {"language_model.model": "language_model"}
def __init__(self, config: InternVLConfig):
super().__init__(config)
self.vision_tower = AutoModel.from_config(config.vision_config)
self.multi_modal_projector = InternVLMultiModalProjector(config)
self.language_model = AutoModel.from_config(config.text_config)
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def set_decoder(self, decoder):
self.language_model = decoder
def get_decoder(self):
return self.language_model
def get_image_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
**kwargs,
):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`)
The tensors corresponding to the input images.
vision_feature_layer (`int` or `list[int]`):
Layer index or list of layer indices to extract features from.
Returns:
vision_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`.
"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
pixel_values = pixel_values.to(dtype=self.dtype) # fp16 compatibility
downsample_ratio = self.config.downsample_ratio
if vision_feature_layer == -1:
vision_features = self.vision_tower(pixel_values=pixel_values).last_hidden_state
else:
vision_features = self.vision_model(pixel_values=pixel_values).hidden_states[vision_feature_layer]
if vision_feature_select_strategy == "default":
vision_features = vision_features[:, 1:, :]
# Calculate dimensions based on vision features
channels = vision_features.shape[1]
feature_size = int(channels**0.5)
batch_size = vision_features.shape[0]
# Reshape tensor to spatial dimensions
vision_features = vision_features.reshape(batch_size, feature_size, feature_size, -1)
# Apply downsampling using pixel shuffle
vision_features = self.pixel_shuffle(vision_features, scale_factor=downsample_ratio)
# Reshape tensor to prepare for projection
vision_features = vision_features.reshape(batch_size, -1, vision_features.shape[-1])
# Project features through multi-modal projector
vision_features = self.multi_modal_projector(vision_features)
return vision_features
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0] * image_features.shape[1]
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor = None,
pixel_values: torch.FloatTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, InternVLModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
return InternVLModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
def pixel_shuffle(self, vision_features: torch.Tensor, scale_factor: float = 0.5):
"""Perform pixel shuffle downsampling on vision features.
Args:
vision_features (`torch.Tensor`):
Input tensor of shape (batch_size, width, height, channels).
scale_factor (`float`, *optional*, defaults to `0.5`):
Factor by which to downsample. Default is 0.5, which halves the dimensions.
Returns:
vision_features (`torch.Tensor`):
Downsampled tensor of shape (batch_size, height*scale_factor, width*scale_factor, channels/(scale_factor^2)).
"""
batch_size, width, height, channels = vision_features.size()
if height % scale_factor != 0 or width % scale_factor != 0:
raise ValueError("Height and width must be divisible by scale_factor for proper downsampling.")
# Reshape to allow downsampling
vision_features = vision_features.view(
batch_size, width, int(height * scale_factor), int(channels / scale_factor)
)
# Permute dimensions to align downsampled axis correctly
vision_features = vision_features.permute(0, 2, 1, 3).contiguous()
# Reshape to achieve final downsampled dimensions
vision_features = vision_features.view(
batch_size, int(height * scale_factor), int(width * scale_factor), int(channels / (scale_factor**2))
)
# Swap height and width back for proper orientation
vision_features = vision_features.permute(0, 2, 1, 3).contiguous()
return vision_features
@dataclass
@auto_docstring(
custom_intro="""
Base class for InternVL causal language model (or autoregressive) outputs.
"""
)
class InternVLCausalLMOutputWithPast(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[list[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
@auto_docstring(
custom_intro="""
The INTERNVL model which consists of a vision backbone and a language model.
"""
)
class InternVLForConditionalGeneration(InternVLPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {
"^language_model.model": "model.language_model",
"^vision_tower": "model.vision_tower",
"^multi_modal_projector": "model.multi_modal_projector",
"^language_model.lm_head": "lm_head",
}
_tied_weights_keys = ["lm_head.weight"]
def __init__(self, config: InternVLConfig):
super().__init__(config)
self.model = InternVLModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def set_decoder(self, decoder):
self.model.set_decoder(decoder)
def get_decoder(self):
return self.model.get_decoder()
def get_image_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
**kwargs,
):
return self.model.get_image_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
**kwargs,
)
# Make modules available through conditional class for BC
@property
def language_model(self):
return self.model.language_model
@property
def vision_tower(self):
return self.model.vision_tower
@property
def multi_modal_projector(self):
return self.model.multi_modal_projector
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor = None,
pixel_values: torch.FloatTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
image_sizes: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, InternVLCausalLMOutputWithPast]:
r"""
Example:
```python
>>> import torch
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
>>> torch_device = "cuda"
>>> processor = AutoProcessor.from_pretrained("OpenGVLab/InternVL3-1B-hf")
>>> model = AutoModelForImageTextToText.from_pretrained(
... "OpenGVLab/InternVL3-1B-hf", dtype=torch.bfloat16, device_map=torch_device
... )
>>> messages = [
... {
... "role": "user",
... "content": [
... {
... "type": "image",
... "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg",
... },
... {
... "type": "image",
... "url": "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg",
... },
... {"type": "text", "text": "These images depict two different landmarks. Can you identify them?"},
... ],
... },
... ]
>>> inputs = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt").to(torch_device)
>>> generate_ids = model.generate(**inputs, max_new_tokens=200)
>>> print(processor.decode(generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True))
The images depict the Statue of Liberty and the Golden Gate Bridge.
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
image_sizes=image_sizes,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return InternVLCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
if cache_position[0] == 0:
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
model_inputs["pixel_values"] = pixel_values
return model_inputs
__all__ = [
"InternVLVisionPreTrainedModel",
"InternVLVisionModel",
"InternVLPreTrainedModel",
"InternVLModel",
"InternVLForConditionalGeneration",
]
| transformers/src/transformers/models/internvl/modeling_internvl.py/0 | {
"file_path": "transformers/src/transformers/models/internvl/modeling_internvl.py",
"repo_id": "transformers",
"token_count": 17636
} | 478 |
# coding=utf-8
# Copyright 2024 JetMoe AI and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JetMoe model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class JetMoeConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`JetMoeModel`]. It is used to instantiate a
JetMoe model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a configuration of the JetMoe-4B.
[jetmoe/jetmoe-8b](https://huggingface.co/jetmoe/jetmoe-8b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the JetMoe model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`JetMoeModel`]
hidden_size (`int`, *optional*, defaults to 2048):
Dimension of the hidden representations.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_key_value_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each key and value in the Transformer encoder.
kv_channels (`int`, *optional*, defaults to 128):
Defines the number of channels for the key and value tensors.
intermediate_size (`int`, *optional*, defaults to 5632):
Dimension of the MLP representations.
max_position_embeddings (`int`, *optional*, defaults to 4096):
The maximum sequence length that this model might ever be used with. JetMoe's attention allows sequence of
up to 4096 tokens.
activation_function (`string`, *optional*, defaults to `"silu"`):
Defines the activation function for MLP experts.
num_local_experts (`int`, *optional*, defaults to 8):
Defines the number of experts in the MoE and MoA.
num_experts_per_tok (`int, *optional*, defaults to 2):
The number of experts to route per-token and for MoE and MoA.
output_router_logits (`bool`, *optional*, defaults to `False`):
Whether or not the router logits should be returned by the model. Enabling this will also
allow the model to output the auxiliary loss.
aux_loss_coef (`float`, *optional*, defaults to 0.01):
The coefficient for the auxiliary loss.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
bos_token_id (`int`, *optional*, defaults to 1):
The id of the "beginning-of-sequence" token.
eos_token_id (`int`, *optional*, defaults to 2):
The id of the "end-of-sequence" token.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether the model's input and output word embeddings should be tied.
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rms_norm_eps (`float`, *optional*, defaults to 1e-06):
The epsilon used by the rms normalization layers.
initializer_range (`float`, *optional*, defaults to 0.01):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
```python
>>> from transformers import JetMoeModel, JetMoeConfig
>>> # Initializing a JetMoe 4B style configuration
>>> configuration = JetMoeConfig()
>>> # Initializing a model from the JetMoe 4B style configuration
>>> model = JetMoeModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "jetmoe"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=32000,
hidden_size=2048,
num_hidden_layers=12,
num_key_value_heads=16,
kv_channels=128,
intermediate_size=5632,
max_position_embeddings=4096,
activation_function="silu",
num_local_experts=8,
num_experts_per_tok=2,
output_router_logits=False,
aux_loss_coef=0.01,
use_cache=True,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=True,
rope_theta=10000.0,
rms_norm_eps=1e-6,
initializer_range=0.01,
attention_dropout=0.0,
**kwargs,
):
if num_experts_per_tok > num_local_experts:
raise ValueError("`num_experts_per_tok` must be less than or equal to `num_local_experts`")
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_key_value_heads * num_experts_per_tok
self.num_key_value_heads = num_key_value_heads
self.kv_channels = kv_channels
self.intermediate_size = intermediate_size
self.max_position_embeddings = max_position_embeddings
self.activation_function = activation_function
self.num_local_experts = num_local_experts
self.num_experts_per_tok = num_experts_per_tok
self.output_router_logits = output_router_logits
self.aux_loss_coef = aux_loss_coef
self.use_cache = use_cache
self.initializer_range = initializer_range
self.attention_dropout = attention_dropout
self.bos_token_id = bos_token_id
self.eos_token_id = eos_token_id
self.rope_theta = rope_theta
self.rms_norm_eps = rms_norm_eps
super().__init__(
bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs
)
__all__ = ["JetMoeConfig"]
| transformers/src/transformers/models/jetmoe/configuration_jetmoe.py/0 | {
"file_path": "transformers/src/transformers/models/jetmoe/configuration_jetmoe.py",
"repo_id": "transformers",
"token_count": 2647
} | 479 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import os
import re
import safetensors.torch
import sentencepiece
import torch
from transformers import (
KyutaiSpeechToTextConfig,
KyutaiSpeechToTextFeatureExtractor,
KyutaiSpeechToTextForConditionalGeneration,
KyutaiSpeechToTextProcessor,
PreTrainedTokenizerFast,
)
from transformers.convert_slow_tokenizer import MoshiConverter
from transformers.utils.hub import cached_file
# fmt: off
MOSHI_ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"out_norm": r"norm",
r"gating\.linear_in": r"mlp.fc1",
r"gating\.linear_out": r"mlp.fc2",
r"self_attn\.out_proj": r"self_attn.o_proj.linear",
r"norm1": r"input_layernorm",
r"norm2": r"post_attention_layernorm",
r"layer_scale_1": r"self_attn_layer_scale",
r"layer_scale_2": r"mlp_layer_scale",
r"alpha": r"weight",
}
# fmt: on
# fmt: off
MIMI_ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"conv\.conv\.conv": "conv",
r"convtr\.convtr\.convtr": "conv",
r"conv\.conv": "conv",
r"convtr\.convtr": "conv",
r"quantizer\.rvq_first\.vq": "quantizer.semantic_residual_vector_quantizer",
r"quantizer\.rvq_first": "quantizer.semantic_residual_vector_quantizer",
r"quantizer\.rvq_rest\.vq": "quantizer.acoustic_residual_vector_quantizer",
r"quantizer\.rvq_rest": "quantizer.acoustic_residual_vector_quantizer",
r"_codebook": "codebook",
r"_initialized": "initialized",
r"embedding_sum": "embed_sum",
r"encoder\.model": "encoder.layers",
r"decoder\.model": "decoder.layers",
r"encoder_transformer\.transformer": "encoder_transformer",
r"decoder_transformer\.transformer": "decoder_transformer",
r"linear1": "mlp.fc1",
r"linear2": "mlp.fc2",
r"self_attn\.out_proj": "self_attn.o_proj",
r"norm1": "input_layernorm",
r"norm2": "post_attention_layernorm",
r"layer_scale_1": "self_attn_layer_scale",
r"layer_scale_2": "mlp_layer_scale",
}
# fmt: on
def permute_for_rope(input_tensor, n_heads, dim1, dim2):
"""
When you go from the complex ROPE formulation to sin and cos one, you need
to permute the query and key weights (to avoid doing it on the fly)
"""
return input_tensor.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2)
def convert_key(key, mapping):
for pattern, replacement in mapping.items():
key = re.sub(pattern, replacement, key)
return key
def convert_kyutai_speech_to_text_state_dict(state_dict, config, unwanted_prefix="transformer."):
hidden_size = config.hidden_size
head_dim = config.head_dim
num_heads = int(config.hidden_size // config.head_dim)
num_key_value_heads = config.num_key_value_heads
key_value_head_dim = config.num_key_value_heads * head_dim
# concat embeddings
embed_tokens_weight = []
for i in range(32):
embed_tokens_weight.append(state_dict.pop(f"emb.{i}.weight"))
embed_tokens_weight = torch.cat(embed_tokens_weight, dim=0)
embed_tokens_weight = torch.cat([state_dict.pop("text_emb.weight"), embed_tokens_weight])
embed_tokens_weight = torch.cat([embed_tokens_weight, torch.zeros(1, config.hidden_size)], dim=0)
state_dict["embed_tokens.embed_tokens.weight"] = embed_tokens_weight
for key, value in list(state_dict.items()):
if unwanted_prefix is not None and unwanted_prefix in key:
new_key = key[len(unwanted_prefix) :]
else:
new_key = key
new_key = convert_key(new_key, MOSHI_ORIGINAL_TO_CONVERTED_KEY_MAPPING)
# Post-process the current_parameter.
if "alpha" in key:
state_dict[key] = state_dict[key].squeeze()
if "in_proj_weight" in new_key:
# split qkv into query key and value
mixed_qkv = state_dict.pop(key)
qkv_dim = mixed_qkv.size(0) // 3
query_layer = mixed_qkv[:qkv_dim]
key_layer = mixed_qkv[qkv_dim : qkv_dim * 2]
value_layer = mixed_qkv[qkv_dim * 2 :]
state_dict[new_key.replace("in_proj_weight", "q_proj.linear.weight")] = permute_for_rope(
query_layer, num_heads, hidden_size, hidden_size
)
state_dict[new_key.replace("in_proj_weight", "k_proj.linear.weight")] = permute_for_rope(
key_layer, num_key_value_heads, key_value_head_dim, hidden_size
)
state_dict[new_key.replace("in_proj_weight", "v_proj.linear.weight")] = value_layer
else:
state_dict[new_key] = state_dict.pop(key)
return state_dict
def convert_mimi_state_dict(state_dict, config, unwanted_prefix=None):
hidden_size = config.hidden_size
head_dim = config.head_dim
num_heads = int(config.hidden_size // config.head_dim)
num_key_value_heads = config.num_key_value_heads
key_value_head_dim = config.num_key_value_heads * head_dim
for key, value in list(state_dict.items()):
if unwanted_prefix is not None and unwanted_prefix in key:
new_key = key[len(unwanted_prefix) :]
else:
new_key = key
new_key = convert_key(new_key, MIMI_ORIGINAL_TO_CONVERTED_KEY_MAPPING)
if "in_proj_weight" in new_key:
# split qkv into query key and value
mixed_qkv = state_dict.pop(key)
qkv_dim = mixed_qkv.size(0) // 3
query_layer = mixed_qkv[:qkv_dim]
key_layer = mixed_qkv[qkv_dim : qkv_dim * 2]
value_layer = mixed_qkv[qkv_dim * 2 :]
state_dict[new_key.replace("in_proj_weight", "q_proj.weight")] = permute_for_rope(
query_layer, num_heads, hidden_size, hidden_size
)
state_dict[new_key.replace("in_proj_weight", "k_proj.weight")] = permute_for_rope(
key_layer, num_key_value_heads, key_value_head_dim, hidden_size
)
state_dict[new_key.replace("in_proj_weight", "v_proj.weight")] = value_layer
else:
state_dict[new_key] = state_dict.pop(key)
return state_dict
def write_model(
input_path_or_repo,
model_name,
codec_model_path_or_repo,
codec_model_name,
output_dir,
safe_serialization=True,
unwanted_prefix="transformer.",
):
print("Converting the model.")
os.makedirs(output_dir, exist_ok=True)
config = KyutaiSpeechToTextConfig(
vocab_size=8001,
max_position_embeddings=375,
num_hidden_layers=16,
num_attention_heads=16,
num_key_value_heads=16,
head_dim=128,
)
config.use_cache = True
config.codec_config.sliding_window = 250
model_path = cached_file(
input_path_or_repo,
model_name,
)
codec_path = cached_file(
codec_model_path_or_repo,
codec_model_name,
)
print(f"Fetching all parameters from the checkpoint at {model_path}...")
state_dict = safetensors.torch.load_file(model_path)
print(f"Fetching all parameters from the checkpoint at {codec_path}...")
codec_state_dict = safetensors.torch.load_file(codec_path)
print("Converting model...")
# -----------------------
# convert parameter names
# -----------------------
state_dict = convert_kyutai_speech_to_text_state_dict(state_dict, config, unwanted_prefix=unwanted_prefix)
codec_state_dict = convert_mimi_state_dict(codec_state_dict, config.codec_config, unwanted_prefix=None)
# -------------------------
# load the weights and save
# -------------------------
print("Loading the checkpoint in a Moshi ASR model.")
with torch.device("meta"):
model = KyutaiSpeechToTextForConditionalGeneration(config)
linear_weight = state_dict.pop("text_linear.weight")
model.model.load_state_dict(state_dict, strict=True, assign=True)
linear_weight = torch.cat([linear_weight, torch.zeros(1, config.hidden_size)])
model.lm_head.load_state_dict({"weight": linear_weight}, strict=True, assign=True)
model.codec_model.load_state_dict(codec_state_dict, strict=True, assign=True)
print("Checkpoint loaded successfully.")
del model.config._name_or_path
del model.config.codec_config._name_or_path
# default generation config
model.generation_config._from_model_config = False
model.generation_config.audio_window_size = 1
model.generation_config.cache_implementation = "sliding_window"
model.codec_model.generation_config._from_model_config = False
model.codec_model.generation_config.cache_implementation = "sliding_window"
model.codec_model.generation_config.use_cache = True
print("Saving the model.")
model.save_pretrained(output_dir, safe_serialization=safe_serialization)
del state_dict, model
# Safety check: reload the converted model
gc.collect()
print("Reloading the model to check if it's saved correctly.")
KyutaiSpeechToTextForConditionalGeneration.from_pretrained(output_dir, dtype=torch.bfloat16, device_map="auto")
print("Model reloaded successfully.")
def write_processor(
input_path_or_repo,
tokenizer_model_name,
codec_model_path_or_repo,
output_dir,
audio_delay_seconds,
audio_silence_prefix_seconds,
):
tokenizer_path = cached_file(
input_path_or_repo,
tokenizer_model_name,
)
tokenizer = MoshiConverter(tokenizer_path).converted()
original_tokenizer = sentencepiece.SentencePieceProcessor(tokenizer_path)
tokenizer = PreTrainedTokenizerFast(
tokenizer_object=tokenizer,
chat_template=None,
unk_token="<unk>",
model_input_names=["input_ids", "attention_mask"],
clean_up_tokenization_spaces=False,
bos_token_id=original_tokenizer.bos_id(),
eos_token_id=original_tokenizer.eos_id(),
pad_token_id=original_tokenizer.pad_id(),
)
feature_extractor = KyutaiSpeechToTextFeatureExtractor(
audio_delay_seconds=audio_delay_seconds,
audio_silence_prefix_seconds=audio_silence_prefix_seconds,
)
processor = KyutaiSpeechToTextProcessor(feature_extractor, tokenizer)
processor.save_pretrained(output_dir)
print(f"Processor saved successfully to {output_dir}")
def main():
parser = argparse.ArgumentParser(description="Convert Moshi ASR weights to HuggingFace format")
parser.add_argument(
"--input_path_or_repo",
type=str,
required=True,
help="Path or repo containing Moshi ASR weights",
)
parser.add_argument(
"--model_name",
type=str,
required=True,
help="Name of the model in input_path_or_repo",
)
parser.add_argument(
"--tokenizer_model_name",
type=str,
required=True,
help="Name of the tokenizer model in input_path_or_repo",
)
parser.add_argument(
"--codec_model_path_or_repo",
type=str,
required=True,
help="Path or repo containing the Mimi weights",
)
parser.add_argument(
"--mimi_name",
type=str,
required=True,
help="Name of the Mimi model in codec_model_path_or_repo",
)
parser.add_argument(
"--preprocessor_model_path_or_repo",
type=str,
required=True,
help="Path or repo containing the preprocessor config",
)
parser.add_argument(
"--output_dir",
help="Location to write HF model and tokenizer",
)
parser.add_argument(
"--safe_serialization", action="store_true", default=True, help="Whether or not to save using `safetensors`."
)
parser.add_argument(
"--audio_delay_seconds",
type=float,
required=True,
help="Audio delay in seconds to add to the right of the input",
)
parser.add_argument(
"--audio_silence_prefix_seconds",
type=float,
required=True,
help="Audio silence prefix in seconds to add to the left of the input",
)
args = parser.parse_args()
write_model(
args.input_path_or_repo,
args.model_name,
args.codec_model_path_or_repo,
args.mimi_name,
args.output_dir,
safe_serialization=args.safe_serialization,
)
write_processor(
args.input_path_or_repo,
args.tokenizer_model_name,
args.preprocessor_model_path_or_repo,
args.output_dir,
args.audio_delay_seconds,
args.audio_silence_prefix_seconds,
)
if __name__ == "__main__":
main()
| transformers/src/transformers/models/kyutai_speech_to_text/convert_kyutai_speech_to_text_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/kyutai_speech_to_text/convert_kyutai_speech_to_text_to_hf.py",
"repo_id": "transformers",
"token_count": 5830
} | 480 |
# coding=utf-8
# Copyright 2021 Microsoft Research The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch LayoutLMv2 model."""
import math
from typing import Optional, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPooling,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward
from ...utils import auto_docstring, is_detectron2_available, logging, requires_backends
from .configuration_layoutlmv2 import LayoutLMv2Config
# soft dependency
if is_detectron2_available():
import detectron2
from detectron2.modeling import META_ARCH_REGISTRY
# This is needed as otherwise their overload will break sequential loading by overwriting buffer over and over. See
# https://github.com/facebookresearch/detectron2/blob/9604f5995cc628619f0e4fd913453b4d7d61db3f/detectron2/layers/batch_norm.py#L83-L86
detectron2.layers.batch_norm.FrozenBatchNorm2d._load_from_state_dict = torch.nn.Module._load_from_state_dict
logger = logging.get_logger(__name__)
class LayoutLMv2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
def _calc_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The `bbox` coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
spatial_position_embeddings = torch.cat(
[
left_position_embeddings,
upper_position_embeddings,
right_position_embeddings,
lower_position_embeddings,
h_position_embeddings,
w_position_embeddings,
],
dim=-1,
)
return spatial_position_embeddings
class LayoutLMv2SelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.fast_qkv = config.fast_qkv
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if config.fast_qkv:
self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False)
self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
else:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def compute_qkv(self, hidden_states):
if self.fast_qkv:
qkv = self.qkv_linear(hidden_states)
q, k, v = torch.chunk(qkv, 3, dim=-1)
if q.ndimension() == self.q_bias.ndimension():
q = q + self.q_bias
v = v + self.v_bias
else:
_sz = (1,) * (q.ndimension() - 1) + (-1,)
q = q + self.q_bias.view(*_sz)
v = v + self.v_bias.view(*_sz)
else:
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
batch_size, seq_length, _ = hidden_states.shape
query, key, value = self.compute_qkv(hidden_states)
# (B, L, H*D) -> (B, H, L, D)
query_layer = query.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
key_layer = key.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
value_layer = value.view(batch_size, -1, self.num_attention_heads, self.attention_head_size).transpose(1, 2)
query_layer = query_layer / math.sqrt(self.attention_head_size)
# [BSZ, NAT, L, L]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.has_relative_attention_bias:
attention_scores += rel_pos
if self.has_spatial_attention_bias:
attention_scores += rel_2d_pos
attention_scores = attention_scores.float().masked_fill_(
attention_mask.to(torch.bool), torch.finfo(attention_scores.dtype).min
)
attention_probs = nn.functional.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LayoutLMv2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMv2SelfAttention(config)
self.output = LayoutLMv2SelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class LayoutLMv2SelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->LayoutLMv2
class LayoutLMv2Intermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->LayoutLM
class LayoutLMv2Output(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class LayoutLMv2Layer(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMv2Attention(config)
self.intermediate = LayoutLMv2Intermediate(config)
self.output = LayoutLMv2Output(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small
absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions
>=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should
allow for more graceful generalization to longer sequences than the model has been trained on.
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class LayoutLMv2Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)])
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if self.has_relative_attention_bias:
self.rel_pos_bins = config.rel_pos_bins
self.max_rel_pos = config.max_rel_pos
self.rel_pos_bias = nn.Linear(self.rel_pos_bins, config.num_attention_heads, bias=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config.max_rel_2d_pos
self.rel_2d_pos_bins = config.rel_2d_pos_bins
self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False)
self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_bins, config.num_attention_heads, bias=False)
self.gradient_checkpointing = False
def _calculate_1d_position_embeddings(self, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos,
)
# Since this is a simple indexing operation that is independent of the input,
# no need to track gradients for this operation
#
# Without this no_grad context, training speed slows down significantly
with torch.no_grad():
rel_pos = self.rel_pos_bias.weight.t()[rel_pos].permute(0, 3, 1, 2)
rel_pos = rel_pos.contiguous()
return rel_pos
def _calculate_2d_position_embeddings(self, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = relative_position_bucket(
rel_pos_x_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_y = relative_position_bucket(
rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
# Since this is a simple indexing operation that is independent of the input,
# no need to track gradients for this operation
#
# Without this no_grad context, training speed slows down significantly
with torch.no_grad():
rel_pos_x = self.rel_pos_x_bias.weight.t()[rel_pos_x].permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias.weight.t()[rel_pos_y].permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
bbox=None,
position_ids=None,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
rel_pos = self._calculate_1d_position_embeddings(position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._calculate_2d_position_embeddings(bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
all_hidden_states,
all_self_attentions,
]
if v is not None
)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
class LayoutLMv2PreTrainedModel(PreTrainedModel):
config: LayoutLMv2Config
base_model_prefix = "layoutlmv2"
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, LayoutLMv2SelfAttention):
if self.config.fast_qkv:
module.q_bias.data.zero_()
module.v_bias.data.zero_()
elif isinstance(module, LayoutLMv2Model):
if hasattr(module, "visual_segment_embedding"):
module.visual_segment_embedding.data.normal_(mean=0.0, std=self.config.initializer_range)
def my_convert_sync_batchnorm(module, process_group=None):
# same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d`
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
module_output = module
if isinstance(module, detectron2.layers.FrozenBatchNorm2d):
module_output = torch.nn.SyncBatchNorm(
num_features=module.num_features,
eps=module.eps,
affine=True,
track_running_stats=True,
process_group=process_group,
)
module_output.weight = torch.nn.Parameter(module.weight)
module_output.bias = torch.nn.Parameter(module.bias)
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device)
for name, child in module.named_children():
module_output.add_module(name, my_convert_sync_batchnorm(child, process_group))
del module
return module_output
class LayoutLMv2VisualBackbone(nn.Module):
def __init__(self, config):
super().__init__()
self.cfg = config.get_detectron2_config()
meta_arch = self.cfg.MODEL.META_ARCHITECTURE
model = META_ARCH_REGISTRY.get(meta_arch)(self.cfg)
assert isinstance(model.backbone, detectron2.modeling.backbone.FPN)
self.backbone = model.backbone
assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)
num_channels = len(self.cfg.MODEL.PIXEL_MEAN)
self.register_buffer(
"pixel_mean",
torch.Tensor(self.cfg.MODEL.PIXEL_MEAN).view(num_channels, 1, 1),
persistent=False,
)
self.register_buffer(
"pixel_std", torch.Tensor(self.cfg.MODEL.PIXEL_STD).view(num_channels, 1, 1), persistent=False
)
self.out_feature_key = "p2"
if torch.are_deterministic_algorithms_enabled():
logger.warning("using `AvgPool2d` instead of `AdaptiveAvgPool2d`")
input_shape = (224, 224)
backbone_stride = self.backbone.output_shape()[self.out_feature_key].stride
self.pool = nn.AvgPool2d(
(
math.ceil(math.ceil(input_shape[0] / backbone_stride) / config.image_feature_pool_shape[0]),
math.ceil(math.ceil(input_shape[1] / backbone_stride) / config.image_feature_pool_shape[1]),
)
)
else:
self.pool = nn.AdaptiveAvgPool2d(config.image_feature_pool_shape[:2])
if len(config.image_feature_pool_shape) == 2:
config.image_feature_pool_shape.append(self.backbone.output_shape()[self.out_feature_key].channels)
assert self.backbone.output_shape()[self.out_feature_key].channels == config.image_feature_pool_shape[2]
def forward(self, images):
images_input = ((images if torch.is_tensor(images) else images.tensor) - self.pixel_mean) / self.pixel_std
features = self.backbone(images_input)
features = features[self.out_feature_key]
features = self.pool(features).flatten(start_dim=2).transpose(1, 2).contiguous()
return features
def synchronize_batch_norm(self):
if not (
torch.distributed.is_available()
and torch.distributed.is_initialized()
and torch.distributed.get_rank() > -1
):
raise RuntimeError("Make sure torch.distributed is set up properly.")
self_rank = torch.distributed.get_rank()
node_size = torch.cuda.device_count()
world_size = torch.distributed.get_world_size()
if not (world_size % node_size == 0):
raise RuntimeError("Make sure the number of processes can be divided by the number of nodes")
node_global_ranks = [list(range(i * node_size, (i + 1) * node_size)) for i in range(world_size // node_size)]
sync_bn_groups = [
torch.distributed.new_group(ranks=node_global_ranks[i]) for i in range(world_size // node_size)
]
node_rank = self_rank // node_size
self.backbone = my_convert_sync_batchnorm(self.backbone, process_group=sync_bn_groups[node_rank])
class LayoutLMv2Pooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
@auto_docstring
class LayoutLMv2Model(LayoutLMv2PreTrainedModel):
def __init__(self, config):
requires_backends(self, "detectron2")
super().__init__(config)
self.config = config
self.has_visual_segment_embedding = config.has_visual_segment_embedding
self.embeddings = LayoutLMv2Embeddings(config)
self.visual = LayoutLMv2VisualBackbone(config)
self.visual_proj = nn.Linear(config.image_feature_pool_shape[-1], config.hidden_size)
if self.has_visual_segment_embedding:
self.visual_segment_embedding = nn.Parameter(nn.Embedding(1, config.hidden_size).weight[0])
self.visual_LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.visual_dropout = nn.Dropout(config.hidden_dropout_prob)
self.encoder = LayoutLMv2Encoder(config)
self.pooler = LayoutLMv2Pooler(config)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _calc_text_embeddings(self, input_ids, bbox, position_ids, token_type_ids, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
if inputs_embeds is None:
inputs_embeds = self.embeddings.word_embeddings(input_ids)
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._calc_spatial_position_embeddings(bbox)
token_type_embeddings = self.embeddings.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + position_embeddings + spatial_position_embeddings + token_type_embeddings
embeddings = self.embeddings.LayerNorm(embeddings)
embeddings = self.embeddings.dropout(embeddings)
return embeddings
def _calc_img_embeddings(self, image, bbox, position_ids):
visual_embeddings = self.visual_proj(self.visual(image))
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._calc_spatial_position_embeddings(bbox)
embeddings = visual_embeddings + position_embeddings + spatial_position_embeddings
if self.has_visual_segment_embedding:
embeddings += self.visual_segment_embedding
embeddings = self.visual_LayerNorm(embeddings)
embeddings = self.visual_dropout(embeddings)
return embeddings
def _calc_visual_bbox(self, image_feature_pool_shape, bbox, device, final_shape):
visual_bbox_x = torch.div(
torch.arange(
0,
1000 * (image_feature_pool_shape[1] + 1),
1000,
device=device,
dtype=bbox.dtype,
),
self.config.image_feature_pool_shape[1],
rounding_mode="floor",
)
visual_bbox_y = torch.div(
torch.arange(
0,
1000 * (self.config.image_feature_pool_shape[0] + 1),
1000,
device=device,
dtype=bbox.dtype,
),
self.config.image_feature_pool_shape[0],
rounding_mode="floor",
)
visual_bbox = torch.stack(
[
visual_bbox_x[:-1].repeat(image_feature_pool_shape[0], 1),
visual_bbox_y[:-1].repeat(image_feature_pool_shape[1], 1).transpose(0, 1),
visual_bbox_x[1:].repeat(image_feature_pool_shape[0], 1),
visual_bbox_y[1:].repeat(image_feature_pool_shape[1], 1).transpose(0, 1),
],
dim=-1,
).view(-1, bbox.size(-1))
visual_bbox = visual_bbox.repeat(final_shape[0], 1, 1)
return visual_bbox
def _get_input_shape(self, input_ids=None, inputs_embeds=None):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
return input_ids.size()
elif inputs_embeds is not None:
return inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
image: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
r"""
bbox (`torch.LongTensor` of shape `((batch_size, sequence_length), 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
image (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `detectron.structures.ImageList` whose `tensors` is of shape `(batch_size, num_channels, height, width)`):
Batch of document images.
Examples:
```python
>>> from transformers import AutoProcessor, LayoutLMv2Model, set_seed
>>> from PIL import Image
>>> import torch
>>> from datasets import load_dataset
>>> set_seed(0)
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
>>> model = LayoutLMv2Model.from_pretrained("microsoft/layoutlmv2-base-uncased")
>>> dataset = load_dataset("hf-internal-testing/fixtures_docvqa")
>>> image = dataset["test"][0]["image"]
>>> encoding = processor(image, return_tensors="pt")
>>> outputs = model(**encoding)
>>> last_hidden_states = outputs.last_hidden_state
>>> last_hidden_states.shape
torch.Size([1, 342, 768])
```
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
input_shape = self._get_input_shape(input_ids, inputs_embeds)
device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_shape = list(input_shape)
visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
visual_shape = torch.Size(visual_shape)
# needs a new copy of input_shape for tracing. Otherwise wrong dimensions will occur
final_shape = list(self._get_input_shape(input_ids, inputs_embeds))
final_shape[1] += visual_shape[1]
final_shape = torch.Size(final_shape)
visual_bbox = self._calc_visual_bbox(self.config.image_feature_pool_shape, bbox, device, final_shape)
final_bbox = torch.cat([bbox, visual_bbox], dim=1)
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
visual_attention_mask = torch.ones(visual_shape, device=device)
final_attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if position_ids is None:
seq_length = input_shape[1]
position_ids = self.embeddings.position_ids[:, :seq_length]
position_ids = position_ids.expand(input_shape)
visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
input_shape[0], 1
)
final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
text_layout_emb = self._calc_text_embeddings(
input_ids=input_ids,
bbox=bbox,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
)
visual_emb = self._calc_img_embeddings(
image=image,
bbox=visual_bbox,
position_ids=visual_position_ids,
)
final_emb = torch.cat([text_layout_emb, visual_emb], dim=1)
extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(self.dtype).min
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
final_emb,
extended_attention_mask,
bbox=final_bbox,
position_ids=final_position_ids,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
LayoutLMv2 Model with a sequence classification head on top (a linear layer on top of the concatenation of the
final hidden state of the [CLS] token, average-pooled initial visual embeddings and average-pooled final visual
embeddings, e.g. for document image classification tasks such as the
[RVL-CDIP](https://www.cs.cmu.edu/~aharley/rvl-cdip/) dataset.
"""
)
class LayoutLMv2ForSequenceClassification(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size * 3, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
image: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SequenceClassifierOutput]:
r"""
input_ids (`torch.LongTensor` of shape `batch_size, sequence_length`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
image (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `detectron.structures.ImageList` whose `tensors` is of shape `(batch_size, num_channels, height, width)`):
Batch of document images.
token_type_ids (`torch.LongTensor` of shape `batch_size, sequence_length`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `batch_size, sequence_length`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Example:
```python
>>> from transformers import AutoProcessor, LayoutLMv2ForSequenceClassification, set_seed
>>> from PIL import Image
>>> import torch
>>> from datasets import load_dataset
>>> set_seed(0)
>>> dataset = load_dataset("aharley/rvl_cdip", split="train", streaming=True)
>>> data = next(iter(dataset))
>>> image = data["image"].convert("RGB")
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
>>> model = LayoutLMv2ForSequenceClassification.from_pretrained(
... "microsoft/layoutlmv2-base-uncased", num_labels=dataset.info.features["label"].num_classes
... )
>>> encoding = processor(image, return_tensors="pt")
>>> sequence_label = torch.tensor([data["label"]])
>>> outputs = model(**encoding, labels=sequence_label)
>>> loss, logits = outputs.loss, outputs.logits
>>> predicted_idx = logits.argmax(dim=-1).item()
>>> predicted_answer = dataset.info.features["label"].names[4]
>>> predicted_idx, predicted_answer # results are not good without further fine-tuning
(7, 'advertisement')
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_shape = list(input_shape)
visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
visual_shape = torch.Size(visual_shape)
final_shape = list(input_shape)
final_shape[1] += visual_shape[1]
final_shape = torch.Size(final_shape)
visual_bbox = self.layoutlmv2._calc_visual_bbox(
self.config.image_feature_pool_shape, bbox, device, final_shape
)
visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
input_shape[0], 1
)
initial_image_embeddings = self.layoutlmv2._calc_img_embeddings(
image=image,
bbox=visual_bbox,
position_ids=visual_position_ids,
)
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
sequence_output, final_image_embeddings = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
cls_final_output = sequence_output[:, 0, :]
# average-pool the visual embeddings
pooled_initial_image_embeddings = initial_image_embeddings.mean(dim=1)
pooled_final_image_embeddings = final_image_embeddings.mean(dim=1)
# concatenate with cls_final_output
sequence_output = torch.cat(
[cls_final_output, pooled_initial_image_embeddings, pooled_final_image_embeddings], dim=1
)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
LayoutLMv2 Model with a token classification head on top (a linear layer on top of the text part of the hidden
states) e.g. for sequence labeling (information extraction) tasks such as
[FUNSD](https://guillaumejaume.github.io/FUNSD/), [SROIE](https://rrc.cvc.uab.es/?ch=13),
[CORD](https://github.com/clovaai/cord) and [Kleister-NDA](https://github.com/applicaai/kleister-nda).
"""
)
class LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
image: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, TokenClassifierOutput]:
r"""
input_ids (`torch.LongTensor` of shape `batch_size, sequence_length`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
image (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `detectron.structures.ImageList` whose `tensors` is of shape `(batch_size, num_channels, height, width)`):
Batch of document images.
token_type_ids (`torch.LongTensor` of shape `batch_size, sequence_length`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `batch_size, sequence_length`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Example:
```python
>>> from transformers import AutoProcessor, LayoutLMv2ForTokenClassification, set_seed
>>> from PIL import Image
>>> from datasets import load_dataset
>>> set_seed(0)
>>> datasets = load_dataset("nielsr/funsd", split="test")
>>> labels = datasets.features["ner_tags"].feature.names
>>> id2label = {v: k for v, k in enumerate(labels)}
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased", revision="no_ocr")
>>> model = LayoutLMv2ForTokenClassification.from_pretrained(
... "microsoft/layoutlmv2-base-uncased", num_labels=len(labels)
... )
>>> data = datasets[0]
>>> image = Image.open(data["image_path"]).convert("RGB")
>>> words = data["words"]
>>> boxes = data["bboxes"] # make sure to normalize your bounding boxes
>>> word_labels = data["ner_tags"]
>>> encoding = processor(
... image,
... words,
... boxes=boxes,
... word_labels=word_labels,
... padding="max_length",
... truncation=True,
... return_tensors="pt",
... )
>>> outputs = model(**encoding)
>>> logits, loss = outputs.logits, outputs.loss
>>> predicted_token_class_ids = logits.argmax(-1)
>>> predicted_tokens_classes = [id2label[t.item()] for t in predicted_token_class_ids[0]]
>>> predicted_tokens_classes[:5] # results are not good without further fine-tuning
['I-HEADER', 'I-HEADER', 'I-QUESTION', 'I-HEADER', 'I-QUESTION']
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# only take the text part of the output representations
sequence_output = outputs[0][:, :seq_length]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring
class LayoutLMv2ForQuestionAnswering(LayoutLMv2PreTrainedModel):
def __init__(self, config, has_visual_segment_embedding=True):
r"""
has_visual_segment_embedding (`bool`, *optional*, defaults to `True`):
Whether or not to add visual segment embeddings.
"""
super().__init__(config)
self.num_labels = config.num_labels
config.has_visual_segment_embedding = has_visual_segment_embedding
self.layoutlmv2 = LayoutLMv2Model(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
bbox: Optional[torch.LongTensor] = None,
image: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, QuestionAnsweringModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `batch_size, sequence_length`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
bbox (`torch.LongTensor` of shape `(batch_size, sequence_length, 4)`, *optional*):
Bounding boxes of each input sequence tokens. Selected in the range `[0,
config.max_2d_position_embeddings-1]`. Each bounding box should be a normalized version in (x0, y0, x1, y1)
format, where (x0, y0) corresponds to the position of the upper left corner in the bounding box, and (x1,
y1) represents the position of the lower right corner.
image (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `detectron.structures.ImageList` whose `tensors` is of shape `(batch_size, num_channels, height, width)`):
Batch of document images.
token_type_ids (`torch.LongTensor` of shape `batch_size, sequence_length`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `batch_size, sequence_length`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
Example:
In this example below, we give the LayoutLMv2 model an image (of texts) and ask it a question. It will give us
a prediction of what it thinks the answer is (the span of the answer within the texts parsed from the image).
```python
>>> from transformers import AutoProcessor, LayoutLMv2ForQuestionAnswering, set_seed
>>> import torch
>>> from PIL import Image
>>> from datasets import load_dataset
>>> set_seed(0)
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
>>> model = LayoutLMv2ForQuestionAnswering.from_pretrained("microsoft/layoutlmv2-base-uncased")
>>> dataset = load_dataset("hf-internal-testing/fixtures_docvqa")
>>> image = dataset["test"][0]["image"]
>>> question = "When is coffee break?"
>>> encoding = processor(image, question, return_tensors="pt")
>>> outputs = model(**encoding)
>>> predicted_start_idx = outputs.start_logits.argmax(-1).item()
>>> predicted_end_idx = outputs.end_logits.argmax(-1).item()
>>> predicted_start_idx, predicted_end_idx
(30, 191)
>>> predicted_answer_tokens = encoding.input_ids.squeeze()[predicted_start_idx : predicted_end_idx + 1]
>>> predicted_answer = processor.tokenizer.decode(predicted_answer_tokens)
>>> predicted_answer # results are not good without further fine-tuning
'44 a. m. to 12 : 25 p. m. 12 : 25 to 12 : 58 p. m. 12 : 58 to 4 : 00 p. m. 2 : 00 to 5 : 00 p. m. coffee break coffee will be served for men and women in the lobby adjacent to exhibit area. please move into exhibit area. ( exhibits open ) trrf general session ( part | ) presiding : lee a. waller trrf vice president “ introductory remarks ” lee a. waller, trrf vice presi - dent individual interviews with trrf public board members and sci - entific advisory council mem - bers conducted by trrf treasurer philip g. kuehn to get answers which the public refrigerated warehousing industry is looking for. plus questions from'
```
```python
>>> target_start_index = torch.tensor([7])
>>> target_end_index = torch.tensor([14])
>>> outputs = model(**encoding, start_positions=target_start_index, end_positions=target_end_index)
>>> predicted_answer_span_start = outputs.start_logits.argmax(-1).item()
>>> predicted_answer_span_end = outputs.end_logits.argmax(-1).item()
>>> predicted_answer_span_start, predicted_answer_span_end
(30, 191)
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
# only take the text part of the output representations
sequence_output = outputs[0][:, :seq_length]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
| transformers/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py/0 | {
"file_path": "transformers/src/transformers/models/layoutlmv2/modeling_layoutlmv2.py",
"repo_id": "transformers",
"token_count": 27043
} | 481 |
# Copyright 2025 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from ...configuration_utils import PretrainedConfig
class Lfm2Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Lfm2Model`]. It is used to instantiate a LFM2
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the LFM2-1.2B model.
e.g. [LiquidAI/LFM2-1.2B](https://huggingface.co/LiquidAI/LFM2-1.2B)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 65536):
Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`Lfm2Model`]
hidden_size (`int`, *optional*, defaults to 2560):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 12288):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer decoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer decoder.
num_key_value_heads (`int`, *optional*, defaults to 8):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
max_position_embeddings (`int`, *optional*, defaults to 128000):
The maximum sequence length that this model might ever be used with. Lfm2 1 supports up to 2048 tokens,
Lfm2 2 up to 4096, CodeLfm2 up to 16384.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
norm_eps (`float`, *optional*, defaults to 1e-05):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0):
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1):
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2):
End of stream token id.
tie_word_embeddings (`bool`, *optional*, defaults to `True`):
Whether to tie weight embeddings
rope_theta (`float`, *optional*, defaults to 1000000.0):
The base period of the RoPE embeddings.
conv_bias (`bool`, *optional*, defaults to `False`):
Whether to use bias in the conv layers.
conv_L_cache (`int`, *optional*, defaults to 3):
L_cache dim in the conv layers.
block_multiple_of (`int`, *optional*, defaults to 256):
Multiple for the `intermediate_size`.
block_ffn_dim_multiplier (`float`, *optional*, defaults to 1.0):
Multiplier for the `intermediate_size`.
block_auto_adjust_ff_dim (`bool`, *optional*, defaults to `True`):
Whether to adjust the dim of the `intermediate_size`.
full_attn_idxs (`Optional`, *optional*):
Index of the layers which use attention.
layer_types (`Optional`, *optional*):
Type of each layers.
```python
>>> from transformers import Lfm2Model, Lfm2Config
>>> # Initializing a LFM2 model
>>> configuration = Lfm2Config()
>>> # Initializing a model from the LFM2-1.2B style configuration
>>> model = Lfm2Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "lfm2"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size: int = 65536,
hidden_size: int = 2560,
intermediate_size: int = 12288,
num_hidden_layers: int = 32,
num_attention_heads: int = 32,
num_key_value_heads: int = 8,
max_position_embeddings: int = 128_000,
initializer_range: float = 0.02,
norm_eps: float = 0.00001,
use_cache: bool = True,
pad_token_id: int = 0,
bos_token_id: int = 1,
eos_token_id: int = 2,
tie_word_embeddings: bool = True,
rope_theta: float = 1000000.0,
conv_bias: bool = False,
conv_L_cache: int = 3,
block_multiple_of: int = 256,
block_ffn_dim_multiplier: float = 1.0,
block_auto_adjust_ff_dim: bool = True,
full_attn_idxs: Optional[list[int]] = None,
layer_types: Optional[list[str]] = None,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.rope_theta = kwargs.get("theta", rope_theta) # to fit original config keys
self.max_position_embeddings = max_position_embeddings
self.use_cache = use_cache
self.norm_eps = norm_eps
self.initializer_range = initializer_range
# attn operator config
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
# custom operator config
self.conv_bias = conv_bias
self.conv_L_cache = conv_L_cache
# MLP config
self.intermediate_size = kwargs.get("block_ff_dim", intermediate_size) # to fit original config keys
self.block_multiple_of = block_multiple_of
self.block_ffn_dim_multiplier = block_ffn_dim_multiplier
self.block_auto_adjust_ff_dim = block_auto_adjust_ff_dim
self.layer_types = layer_types
if self.layer_types is None:
full_attn_idxs = full_attn_idxs if full_attn_idxs is not None else list(range(num_hidden_layers))
self.layer_types = ["full_attention" if i in full_attn_idxs else "conv" for i in range(num_hidden_layers)]
tie_word_embeddings = kwargs.get("tie_embedding", tie_word_embeddings) # to fit original config keys
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
__all__ = ["Lfm2Config"]
| transformers/src/transformers/models/lfm2/configuration_lfm2.py/0 | {
"file_path": "transformers/src/transformers/models/lfm2/configuration_lfm2.py",
"repo_id": "transformers",
"token_count": 3088
} | 482 |
# coding=utf-8
# Copyright 2024 HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...utils import (
logging,
)
from ..auto import CONFIG_MAPPING, AutoConfig
logger = logging.get_logger(__name__)
class LlavaOnevisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LlavaOnevisionForConditionalGeneration`]. It is used to instantiate an
Llava-NeXT model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the [llava-hf/llava-onevision-qwen2-7b-ov-hf](https://huggingface.co/llava-hf/llava-onevision-qwen2-7b-ov-hf)
model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `SiglipVisionConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Qwen2Config`):
The config object or dictionary of the text backbone.
image_token_index (`int`, *optional*, defaults to 151646):
The image token index to encode the image prompt.
video_token_index (`int`, *optional*, defaults to 151647):
The video token index to encode the video prompt.
projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
The activation function used by the multimodal projector.
vision_feature_select_strategy (`str`, *optional*, defaults to `"full"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`. If `"default"`, the CLS token is removed from the vision features.
If `"full"`, the full vision features are used.
vision_feature_layer (`Union[int, list[int]]`, *optional*, defaults to -1):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
vision_aspect_ratio (`str`, *optional*, defaults to `"anyres_max_9"`):
Aspect ratio used when processong image features. The default value is "anyres_max_9".
image_grid_pinpoints (`List`, *optional*):
A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list
of the form `(height, width)`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
multimodal_projector_bias (`bool`, *optional*, defaults to `True`):
Whether to use bias in the multimodal projector.
Example:
```python
>>> from transformers import LlavaOnevisionForConditionalGeneration, LlavaOnevisionConfig, SiglipVisionConfig, Qwen2Config
>>> # Initializing a CLIP-vision config
>>> vision_config = SiglipVisionConfig()
>>> # Initializing a Llama config
>>> text_config = Qwen2Config()
>>> # Initializing a Llava-Next llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration
>>> configuration = LlavaOnevisionConfig(vision_config, text_config)
>>> # Initializing a model from the llava-hf/llava-onevision-qwen2-7b-ov-hf style configuration
>>> model = LlavaOnevisionForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "llava_onevision"
attribute_map = {
"image_token_id": "image_token_index",
"video_token_id": "video_token_index",
}
sub_configs = {"text_config": AutoConfig, "vision_config": AutoConfig}
def __init__(
self,
vision_config=None,
text_config=None,
image_token_index=151646,
video_token_index=151647,
projector_hidden_act="gelu",
vision_feature_select_strategy="full",
vision_feature_layer=-1,
vision_aspect_ratio="anyres_max_9",
image_grid_pinpoints=None,
tie_word_embeddings=False,
multimodal_projector_bias=True,
**kwargs,
):
self.image_token_index = image_token_index
self.video_token_index = video_token_index
self.projector_hidden_act = projector_hidden_act
self.multimodal_projector_bias = multimodal_projector_bias
if vision_feature_select_strategy not in ["default", "full"]:
raise ValueError(
"vision_feature_select_strategy should be one of 'default', 'full'."
f"Got: {vision_feature_select_strategy}"
)
self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer
self.vision_aspect_ratio = vision_aspect_ratio
image_grid_pinpoints = (
image_grid_pinpoints
if image_grid_pinpoints is not None
else [
[384, 384],
[384, 768],
[384, 1152],
[384, 1536],
[384, 1920],
[384, 2304],
[768, 384],
[768, 768],
[768, 1152],
[768, 1536],
[768, 1920],
[768, 2304],
[1152, 384],
[1152, 768],
[1152, 1152],
[1152, 1536],
[1152, 1920],
[1152, 2304],
[1536, 384],
[1536, 768],
[1536, 1152],
[1536, 1536],
[1536, 1920],
[1536, 2304],
[1920, 384],
[1920, 768],
[1920, 1152],
[1920, 1536],
[1920, 1920],
[1920, 2304],
[2304, 384],
[2304, 768],
[2304, 1152],
[2304, 1536],
[2304, 1920],
[2304, 2304],
]
)
self.image_grid_pinpoints = image_grid_pinpoints
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "siglip_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
elif vision_config is None:
vision_config = CONFIG_MAPPING["siglip_vision_model"](
hidden_size=1152,
intermediate_size=4304,
patch_size=14,
image_size=384,
num_hidden_layers=26,
num_attention_heads=16,
vision_use_head=False,
)
self.vision_config = vision_config
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "qwen2")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["qwen2"]()
self.text_config = text_config
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
__all__ = ["LlavaOnevisionConfig"]
| transformers/src/transformers/models/llava_onevision/configuration_llava_onevision.py/0 | {
"file_path": "transformers/src/transformers/models/llava_onevision/configuration_llava_onevision.py",
"repo_id": "transformers",
"token_count": 3480
} | 483 |
# coding=utf-8
# Copyright 2022, The LongT5 Authors and HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""LongT5 model configuration"""
from collections.abc import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeq2SeqConfigWithPast
from ...utils import logging
logger = logging.get_logger(__name__)
class LongT5Config(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LongT5Model`] or a [`FlaxLongT5Model`]. It is
used to instantiate a LongT5 model according to the specified arguments, defining the model architecture.
Instantiating a configuration with the defaults will yield a similar configuration to that of the LongT5
[google/long-t5-local-base](https://huggingface.co/google/long-t5-local-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Arguments:
vocab_size (`int`, *optional*, defaults to 32128):
Vocabulary size of the LongT5 model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`LongT5Model`].
d_model (`int`, *optional*, defaults to 512):
Size of the encoder layers and the pooler layer.
d_kv (`int`, *optional*, defaults to 64):
Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model //
num_heads`.
d_ff (`int`, *optional*, defaults to 2048):
Size of the intermediate feed forward layer in each `LongT5Block`.
num_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
num_decoder_layers (`int`, *optional*):
Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
num_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
local_radius (`int`, *optional*, defaults to 127)
Number of tokens to the left/right for each token to locally self-attend in a local attention mechanism.
global_block_size (`int`, *optional*, defaults to 16)
Length of blocks an input sequence is divided into for a global token representation. Used only for
`encoder_attention_type = "transient-global"`.
relative_attention_num_buckets (`int`, *optional*, defaults to 32):
The number of buckets to use for each attention layer.
relative_attention_max_distance (`int`, *optional*, defaults to 128):
The maximum distance of the longer sequences for the bucket separation.
dropout_rate (`float`, *optional*, defaults to 0.1):
The ratio for all dropout layers.
layer_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the layer normalization layers.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. LongT5v1.1 uses the
`"gated-gelu"` feed forward projection. Original LongT5 implementation uses `"gated-gelu"`.
encoder_attention_type (`string`, *optional*, defaults to `"local"`):
Type of encoder attention to be used. Should be one of `"local"` or `"transient-global"`, which are
supported by LongT5 implementation.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
"""
model_type = "longt5"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
"head_dim": "d_kv",
}
def __init__(
self,
vocab_size=32128,
d_model=512,
d_kv=64,
d_ff=2048,
num_layers=6,
num_decoder_layers=None,
num_heads=8,
local_radius=127,
global_block_size=16,
relative_attention_num_buckets=32,
relative_attention_max_distance=128,
dropout_rate=0.1,
layer_norm_epsilon=1e-6,
initializer_factor=1.0,
feed_forward_proj="relu",
is_encoder_decoder=True,
encoder_attention_type="local",
use_cache=True,
pad_token_id=0,
eos_token_id=1,
**kwargs,
):
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
# default = symmetry
self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers
self.num_heads = num_heads
self.local_radius = local_radius
self.global_block_size = global_block_size
self.relative_attention_num_buckets = relative_attention_num_buckets
self.relative_attention_max_distance = relative_attention_max_distance
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.encoder_attention_type = encoder_attention_type
self.use_cache = use_cache
act_info = self.feed_forward_proj.split("-")
self.dense_act_fn = act_info[-1]
self.is_gated_act = act_info[0] == "gated"
if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'"
)
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
self.dense_act_fn = "gelu_new"
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
**kwargs,
)
class LongT5OnnxConfig(OnnxSeq2SeqConfigWithPast):
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
common_inputs = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence"
common_inputs["decoder_input_ids"] = {0: "batch"}
common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction="inputs")
return common_inputs
@property
def default_onnx_opset(self) -> int:
return 13
__all__ = ["LongT5Config", "LongT5OnnxConfig"]
| transformers/src/transformers/models/longt5/configuration_longt5.py/0 | {
"file_path": "transformers/src/transformers/models/longt5/configuration_longt5.py",
"repo_id": "transformers",
"token_count": 3334
} | 484 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import socket
import time
import warnings
from pathlib import Path
from typing import Union
from zipfile import ZipFile
import numpy as np
import torch
from huggingface_hub.hf_api import list_models
from torch import nn
from tqdm import tqdm
from transformers import MarianConfig, MarianMTModel, MarianTokenizer
def remove_suffix(text: str, suffix: str):
if text.endswith(suffix):
return text[: -len(suffix)]
return text # or whatever
def remove_prefix(text: str, prefix: str):
if text.startswith(prefix):
return text[len(prefix) :]
return text # or whatever
def convert_encoder_layer(opus_dict, layer_prefix: str, converter: dict):
sd = {}
for k in opus_dict:
if not k.startswith(layer_prefix):
continue
stripped = remove_prefix(k, layer_prefix)
v = opus_dict[k].T # besides embeddings, everything must be transposed.
sd[converter[stripped]] = torch.tensor(v).squeeze()
return sd
def load_layers_(layer_lst: nn.ModuleList, opus_state: dict, converter, is_decoder=False):
for i, layer in enumerate(layer_lst):
layer_tag = f"decoder_l{i + 1}_" if is_decoder else f"encoder_l{i + 1}_"
sd = convert_encoder_layer(opus_state, layer_tag, converter)
layer.load_state_dict(sd, strict=False)
def find_pretrained_model(src_lang: str, tgt_lang: str) -> list[str]:
"""Find models that can accept src_lang as input and return tgt_lang as output."""
prefix = "Helsinki-NLP/opus-mt-"
model_list = list_models()
model_ids = [x.id for x in model_list if x.id.startswith("Helsinki-NLP")]
src_and_targ = [
remove_prefix(m, prefix).lower().split("-") for m in model_ids if "+" not in m
] # + can't be loaded.
matching = [f"{prefix}{a}-{b}" for (a, b) in src_and_targ if src_lang in a and tgt_lang in b]
return matching
def add_emb_entries(wemb, final_bias, n_special_tokens=1):
vsize, d_model = wemb.shape
embs_to_add = np.zeros((n_special_tokens, d_model))
new_embs = np.concatenate([wemb, embs_to_add])
bias_to_add = np.zeros((n_special_tokens, 1))
new_bias = np.concatenate((final_bias, bias_to_add), axis=1)
return new_embs, new_bias
def _cast_yaml_str(v):
bool_dct = {"true": True, "false": False}
if not isinstance(v, str):
return v
elif v in bool_dct:
return bool_dct[v]
try:
return int(v)
except (TypeError, ValueError):
return v
def cast_marian_config(raw_cfg: dict[str, str]) -> dict:
return {k: _cast_yaml_str(v) for k, v in raw_cfg.items()}
CONFIG_KEY = "special:model.yml"
def load_config_from_state_dict(opus_dict):
import yaml
cfg_str = "".join([chr(x) for x in opus_dict[CONFIG_KEY]])
yaml_cfg = yaml.load(cfg_str[:-1], Loader=yaml.BaseLoader)
return cast_marian_config(yaml_cfg)
def find_model_file(dest_dir): # this one better
model_files = list(Path(dest_dir).glob("*.npz"))
if len(model_files) != 1:
raise ValueError(f"Found more than one model file: {model_files}")
model_file = model_files[0]
return model_file
# Group Names Logic: change long opus model names to something shorter, like opus-mt-en-ROMANCE
ROM_GROUP = (
"fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO+es_EC+es_ES+es_GT"
"+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR+pt_PT+gl+lad+an+mwl+it+it_IT+co"
"+nap+scn+vec+sc+ro+la"
)
GROUPS = [
("cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", "ZH"),
(ROM_GROUP, "ROMANCE"),
("de+nl+fy+af+da+fo+is+no+nb+nn+sv", "NORTH_EU"),
("da+fo+is+no+nb+nn+sv", "SCANDINAVIA"),
("se+sma+smj+smn+sms", "SAMI"),
("nb_NO+nb+nn_NO+nn+nog+no_nb+no", "NORWAY"),
("ga+cy+br+gd+kw+gv", "CELTIC"), # https://en.wikipedia.org/wiki/Insular_Celtic_languages
]
GROUP_TO_OPUS_NAME = {
"opus-mt-ZH-de": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-de",
"opus-mt-ZH-fi": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi",
"opus-mt-ZH-sv": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-sv",
"opus-mt-SCANDINAVIA-SCANDINAVIA": "da+fo+is+no+nb+nn+sv-da+fo+is+no+nb+nn+sv",
"opus-mt-NORTH_EU-NORTH_EU": "de+nl+fy+af+da+fo+is+no+nb+nn+sv-de+nl+fy+af+da+fo+is+no+nb+nn+sv",
"opus-mt-de-ZH": "de-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh",
"opus-mt-en_el_es_fi-en_el_es_fi": "en+el+es+fi-en+el+es+fi",
"opus-mt-en-ROMANCE": (
"en-fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO"
"+es_EC+es_ES+es_GT+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR"
"+pt_PT+gl+lad+an+mwl+it+it_IT+co+nap+scn+vec+sc+ro+la"
),
"opus-mt-en-CELTIC": "en-ga+cy+br+gd+kw+gv",
"opus-mt-es-NORWAY": "es-nb_NO+nb+nn_NO+nn+nog+no_nb+no",
"opus-mt-fi_nb_no_nn_ru_sv_en-SAMI": "fi+nb+no+nn+ru+sv+en-se+sma+smj+smn+sms",
"opus-mt-fi-ZH": "fi-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh",
"opus-mt-fi-NORWAY": "fi-nb_NO+nb+nn_NO+nn+nog+no_nb+no",
"opus-mt-ROMANCE-en": (
"fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO"
"+es_EC+es_ES+es_GT+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR"
"+pt_PT+gl+lad+an+mwl+it+it_IT+co+nap+scn+vec+sc+ro+la-en"
),
"opus-mt-CELTIC-en": "ga+cy+br+gd+kw+gv-en",
"opus-mt-sv-ZH": "sv-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh",
"opus-mt-sv-NORWAY": "sv-nb_NO+nb+nn_NO+nn+nog+no_nb+no",
}
OPUS_GITHUB_URL = "https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/"
ORG_NAME = "Helsinki-NLP/"
def convert_opus_name_to_hf_name(x):
"""For OPUS-MT-Train/ DEPRECATED"""
for substr, grp_name in GROUPS:
x = x.replace(substr, grp_name)
return x.replace("+", "_")
def convert_hf_name_to_opus_name(hf_model_name):
"""
Relies on the assumption that there are no language codes like pt_br in models that are not in GROUP_TO_OPUS_NAME.
"""
hf_model_name = remove_prefix(hf_model_name, ORG_NAME)
if hf_model_name in GROUP_TO_OPUS_NAME:
opus_w_prefix = GROUP_TO_OPUS_NAME[hf_model_name]
else:
opus_w_prefix = hf_model_name.replace("_", "+")
return remove_prefix(opus_w_prefix, "opus-mt-")
def get_system_metadata(repo_root):
import git
return {
"helsinki_git_sha": git.Repo(path=repo_root, search_parent_directories=True).head.object.hexsha,
"transformers_git_sha": git.Repo(path=".", search_parent_directories=True).head.object.hexsha,
"port_machine": socket.gethostname(),
"port_time": time.strftime("%Y-%m-%d-%H:%M"),
}
# docstyle-ignore
FRONT_MATTER_TEMPLATE = """---
language:
{}
tags:
- translation
license: apache-2.0
---
"""
DEFAULT_REPO = "Tatoeba-Challenge"
DEFAULT_MODEL_DIR = os.path.join(DEFAULT_REPO, "models")
def write_model_card(
hf_model_name: str,
repo_root=DEFAULT_REPO,
save_dir=Path("marian_converted"),
dry_run=False,
extra_metadata={},
) -> str:
"""
Copy the most recent model's readme section from opus, and add metadata. upload command: aws s3 sync model_card_dir
s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun
"""
import pandas as pd
hf_model_name = remove_prefix(hf_model_name, ORG_NAME)
opus_name: str = convert_hf_name_to_opus_name(hf_model_name)
if repo_root not in ("OPUS-MT-train", "Tatoeba-Challenge"):
raise ValueError(f"Repos root is {repo_root}. Expected either OPUS-MT-train or Tatoeba-Challenge")
opus_readme_path = Path(repo_root).joinpath("models", opus_name, "README.md")
if not (opus_readme_path.exists()):
raise ValueError(f"Readme file {opus_readme_path} not found")
opus_src, opus_tgt = [x.split("+") for x in opus_name.split("-")]
readme_url = f"https://github.com/Helsinki-NLP/{repo_root}/tree/master/models/{opus_name}/README.md"
s, t = ",".join(opus_src), ",".join(opus_tgt)
metadata = {
"hf_name": hf_model_name,
"source_languages": s,
"target_languages": t,
"opus_readme_url": readme_url,
"original_repo": repo_root,
"tags": ["translation"],
}
metadata.update(extra_metadata)
metadata.update(get_system_metadata(repo_root))
# combine with opus markdown
extra_markdown = (
f"### {hf_model_name}\n\n* source group: {metadata['src_name']} \n* target group: "
f"{metadata['tgt_name']} \n* OPUS readme: [{opus_name}]({readme_url})\n"
)
content = opus_readme_path.open().read()
content = content.split("\n# ")[-1] # Get the lowest level 1 header in the README -- the most recent model.
splat = content.split("*")[2:]
print(splat[3])
content = "*".join(splat)
content = (
FRONT_MATTER_TEMPLATE.format(metadata["src_alpha2"])
+ extra_markdown
+ "\n* "
+ content.replace("download", "download original weights")
)
items = "\n\n".join([f"- {k}: {v}" for k, v in metadata.items()])
sec3 = "\n### System Info: \n" + items
content += sec3
if dry_run:
return content, metadata
sub_dir = save_dir / f"opus-mt-{hf_model_name}"
sub_dir.mkdir(exist_ok=True)
dest = sub_dir / "README.md"
dest.open("w").write(content)
pd.Series(metadata).to_json(sub_dir / "metadata.json")
# if dry_run:
return content, metadata
def make_registry(repo_path="Opus-MT-train/models"):
if not (Path(repo_path) / "fr-en" / "README.md").exists():
raise ValueError(
f"repo_path:{repo_path} does not exist: "
"You must run: git clone git@github.com:Helsinki-NLP/Opus-MT-train.git before calling."
)
results = {}
for p in Path(repo_path).iterdir():
n_dash = p.name.count("-")
if n_dash == 0:
continue
else:
lns = list(open(p / "README.md").readlines())
results[p.name] = _parse_readme(lns)
return [(k, v["pre-processing"], v["download"], v["download"][:-4] + ".test.txt") for k, v in results.items()]
def convert_all_sentencepiece_models(model_list=None, repo_path=None, dest_dir=Path("marian_converted")):
"""Requires 300GB"""
save_dir = Path("marian_ckpt")
dest_dir = Path(dest_dir)
dest_dir.mkdir(exist_ok=True)
save_paths = []
if model_list is None:
model_list: list = make_registry(repo_path=repo_path)
for k, prepro, download, test_set_url in tqdm(model_list):
if "SentencePiece" not in prepro: # dont convert BPE models.
continue
if not os.path.exists(save_dir / k):
download_and_unzip(download, save_dir / k)
pair_name = convert_opus_name_to_hf_name(k)
convert(save_dir / k, dest_dir / f"opus-mt-{pair_name}")
save_paths.append(dest_dir / f"opus-mt-{pair_name}")
return save_paths
def lmap(f, x) -> list:
return list(map(f, x))
def fetch_test_set(test_set_url):
import wget
fname = wget.download(test_set_url, "opus_test.txt")
lns = Path(fname).open().readlines()
src = lmap(str.strip, lns[::4])
gold = lmap(str.strip, lns[1::4])
mar_model = lmap(str.strip, lns[2::4])
if not (len(gold) == len(mar_model) == len(src)):
raise ValueError(f"Gold, marian and source lengths {len(gold)}, {len(mar_model)}, {len(src)} mismatched")
os.remove(fname)
return src, mar_model, gold
def convert_whole_dir(path=Path("marian_ckpt/")):
for subdir in tqdm(list(path.ls())):
dest_dir = f"marian_converted/{subdir.name}"
if (dest_dir / "pytorch_model.bin").exists():
continue
convert(source_dir, dest_dir)
def _parse_readme(lns):
"""Get link and metadata from opus model card equivalent."""
subres = {}
for ln in [x.strip() for x in lns]:
if not ln.startswith("*"):
continue
ln = ln[1:].strip()
for k in ["download", "dataset", "models", "model", "pre-processing"]:
if ln.startswith(k):
break
else:
continue
if k in ["dataset", "model", "pre-processing"]:
splat = ln.split(":")
_, v = splat
subres[k] = v
elif k == "download":
v = ln.split("(")[-1][:-1]
subres[k] = v
return subres
def save_tokenizer_config(dest_dir: Path, separate_vocabs=False):
dname = dest_dir.name.split("-")
dct = {"target_lang": dname[-1], "source_lang": "-".join(dname[:-1]), "separate_vocabs": separate_vocabs}
save_json(dct, dest_dir / "tokenizer_config.json")
def add_to_vocab_(vocab: dict[str, int], special_tokens: list[str]):
start = max(vocab.values()) + 1
added = 0
for tok in special_tokens:
if tok in vocab:
continue
vocab[tok] = start + added
added += 1
return added
def find_vocab_file(model_dir):
return list(model_dir.glob("*vocab.yml"))[0]
def find_src_vocab_file(model_dir):
return list(model_dir.glob("*src.vocab.yml"))[0]
def find_tgt_vocab_file(model_dir):
return list(model_dir.glob("*trg.vocab.yml"))[0]
def add_special_tokens_to_vocab(model_dir: Path, separate_vocab=False) -> None:
if separate_vocab:
vocab = load_yaml(find_src_vocab_file(model_dir))
vocab = {k: int(v) for k, v in vocab.items()}
num_added = add_to_vocab_(vocab, ["<pad>"])
save_json(vocab, model_dir / "vocab.json")
vocab = load_yaml(find_tgt_vocab_file(model_dir))
vocab = {k: int(v) for k, v in vocab.items()}
num_added = add_to_vocab_(vocab, ["<pad>"])
save_json(vocab, model_dir / "target_vocab.json")
save_tokenizer_config(model_dir, separate_vocabs=separate_vocab)
else:
vocab = load_yaml(find_vocab_file(model_dir))
vocab = {k: int(v) for k, v in vocab.items()}
num_added = add_to_vocab_(vocab, ["<pad>"])
print(f"added {num_added} tokens to vocab")
save_json(vocab, model_dir / "vocab.json")
save_tokenizer_config(model_dir)
def check_equal(marian_cfg, k1, k2):
v1, v2 = marian_cfg[k1], marian_cfg[k2]
if v1 != v2:
raise ValueError(f"hparams {k1},{k2} differ: {v1} != {v2}")
def check_marian_cfg_assumptions(marian_cfg):
assumed_settings = {
"layer-normalization": False,
"right-left": False,
"transformer-ffn-depth": 2,
"transformer-aan-depth": 2,
"transformer-no-projection": False,
"transformer-postprocess-emb": "d",
"transformer-postprocess": "dan", # Dropout, add, normalize
"transformer-preprocess": "",
"type": "transformer",
"ulr-dim-emb": 0,
"dec-cell-base-depth": 2,
"dec-cell-high-depth": 1,
"transformer-aan-nogate": False,
}
for k, v in assumed_settings.items():
actual = marian_cfg[k]
if actual != v:
raise ValueError(f"Unexpected config value for {k} expected {v} got {actual}")
BIAS_KEY = "decoder_ff_logit_out_b"
BART_CONVERTER = { # for each encoder and decoder layer
"self_Wq": "self_attn.q_proj.weight",
"self_Wk": "self_attn.k_proj.weight",
"self_Wv": "self_attn.v_proj.weight",
"self_Wo": "self_attn.out_proj.weight",
"self_bq": "self_attn.q_proj.bias",
"self_bk": "self_attn.k_proj.bias",
"self_bv": "self_attn.v_proj.bias",
"self_bo": "self_attn.out_proj.bias",
"self_Wo_ln_scale": "self_attn_layer_norm.weight",
"self_Wo_ln_bias": "self_attn_layer_norm.bias",
"ffn_W1": "fc1.weight",
"ffn_b1": "fc1.bias",
"ffn_W2": "fc2.weight",
"ffn_b2": "fc2.bias",
"ffn_ffn_ln_scale": "final_layer_norm.weight",
"ffn_ffn_ln_bias": "final_layer_norm.bias",
# Decoder Cross Attention
"context_Wk": "encoder_attn.k_proj.weight",
"context_Wo": "encoder_attn.out_proj.weight",
"context_Wq": "encoder_attn.q_proj.weight",
"context_Wv": "encoder_attn.v_proj.weight",
"context_bk": "encoder_attn.k_proj.bias",
"context_bo": "encoder_attn.out_proj.bias",
"context_bq": "encoder_attn.q_proj.bias",
"context_bv": "encoder_attn.v_proj.bias",
"context_Wo_ln_scale": "encoder_attn_layer_norm.weight",
"context_Wo_ln_bias": "encoder_attn_layer_norm.bias",
}
class OpusState:
def __init__(self, source_dir, eos_token_id=0):
npz_path = find_model_file(source_dir)
self.state_dict = np.load(npz_path)
cfg = load_config_from_state_dict(self.state_dict)
if cfg["dim-vocabs"][0] != cfg["dim-vocabs"][1]:
raise ValueError
if "Wpos" in self.state_dict:
raise ValueError("Wpos key in state dictionary")
self.state_dict = dict(self.state_dict)
if cfg["tied-embeddings-all"]:
cfg["tied-embeddings-src"] = True
cfg["tied-embeddings"] = True
self.share_encoder_decoder_embeddings = cfg["tied-embeddings-src"]
# create the tokenizer here because we need to know the eos_token_id
self.source_dir = source_dir
self.tokenizer = self.load_tokenizer()
# retrieve EOS token and set correctly
tokenizer_has_eos_token_id = (
hasattr(self.tokenizer, "eos_token_id") and self.tokenizer.eos_token_id is not None
)
eos_token_id = self.tokenizer.eos_token_id if tokenizer_has_eos_token_id else 0
if cfg["tied-embeddings-src"]:
self.wemb, self.final_bias = add_emb_entries(self.state_dict["Wemb"], self.state_dict[BIAS_KEY], 1)
self.pad_token_id = self.wemb.shape[0] - 1
cfg["vocab_size"] = self.pad_token_id + 1
else:
self.wemb, _ = add_emb_entries(self.state_dict["encoder_Wemb"], self.state_dict[BIAS_KEY], 1)
self.dec_wemb, self.final_bias = add_emb_entries(
self.state_dict["decoder_Wemb"], self.state_dict[BIAS_KEY], 1
)
# still assuming that vocab size is same for encoder and decoder
self.pad_token_id = self.wemb.shape[0] - 1
cfg["vocab_size"] = self.pad_token_id + 1
cfg["decoder_vocab_size"] = self.pad_token_id + 1
if cfg["vocab_size"] != self.tokenizer.vocab_size:
raise ValueError(
f"Original vocab size {cfg['vocab_size']} and new vocab size {len(self.tokenizer.encoder)} mismatched."
)
# self.state_dict['Wemb'].sha
self.state_keys = list(self.state_dict.keys())
if "Wtype" in self.state_dict:
raise ValueError("Wtype key in state dictionary")
self._check_layer_entries()
self.cfg = cfg
hidden_size, intermediate_shape = self.state_dict["encoder_l1_ffn_W1"].shape
if hidden_size != cfg["dim-emb"]:
raise ValueError(f"Hidden size {hidden_size} and configured size {cfg['dim_emb']} mismatched")
# Process decoder.yml
decoder_yml = cast_marian_config(load_yaml(source_dir / "decoder.yml"))
check_marian_cfg_assumptions(cfg)
self.hf_config = MarianConfig(
vocab_size=cfg["vocab_size"],
decoder_vocab_size=cfg.get("decoder_vocab_size", cfg["vocab_size"]),
share_encoder_decoder_embeddings=cfg["tied-embeddings-src"],
decoder_layers=cfg["dec-depth"],
encoder_layers=cfg["enc-depth"],
decoder_attention_heads=cfg["transformer-heads"],
encoder_attention_heads=cfg["transformer-heads"],
decoder_ffn_dim=cfg["transformer-dim-ffn"],
encoder_ffn_dim=cfg["transformer-dim-ffn"],
d_model=cfg["dim-emb"],
activation_function=cfg["transformer-ffn-activation"],
pad_token_id=self.pad_token_id,
eos_token_id=eos_token_id,
forced_eos_token_id=eos_token_id,
bos_token_id=0,
max_position_embeddings=cfg["dim-emb"],
scale_embedding=True,
normalize_embedding="n" in cfg["transformer-preprocess"],
static_position_embeddings=not cfg["transformer-train-position-embeddings"],
tie_word_embeddings=cfg["tied-embeddings"],
dropout=0.1, # see opus-mt-train repo/transformer-dropout param.
# default: add_final_layer_norm=False,
num_beams=decoder_yml["beam-size"],
decoder_start_token_id=self.pad_token_id,
bad_words_ids=[[self.pad_token_id]],
max_length=512,
)
def _check_layer_entries(self):
self.encoder_l1 = self.sub_keys("encoder_l1")
self.decoder_l1 = self.sub_keys("decoder_l1")
self.decoder_l2 = self.sub_keys("decoder_l2")
if len(self.encoder_l1) != 16:
warnings.warn(f"Expected 16 keys for each encoder layer, got {len(self.encoder_l1)}")
if len(self.decoder_l1) != 26:
warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}")
if len(self.decoder_l2) != 26:
warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}")
@property
def extra_keys(self):
extra = []
for k in self.state_keys:
if (
k.startswith("encoder_l")
or k.startswith("decoder_l")
or k in [CONFIG_KEY, "Wemb", "encoder_Wemb", "decoder_Wemb", "Wpos", "decoder_ff_logit_out_b"]
):
continue
else:
extra.append(k)
return extra
def sub_keys(self, layer_prefix):
return [remove_prefix(k, layer_prefix) for k in self.state_dict if k.startswith(layer_prefix)]
def load_tokenizer(self):
# save tokenizer
add_special_tokens_to_vocab(self.source_dir, not self.share_encoder_decoder_embeddings)
return MarianTokenizer.from_pretrained(str(self.source_dir))
def load_marian_model(self) -> MarianMTModel:
state_dict, cfg = self.state_dict, self.hf_config
if not cfg.static_position_embeddings:
raise ValueError("config.static_position_embeddings should be True")
model = MarianMTModel(cfg)
if "hidden_size" in cfg.to_dict():
raise ValueError("hidden_size is in config")
load_layers_(
model.model.encoder.layers,
state_dict,
BART_CONVERTER,
)
load_layers_(model.model.decoder.layers, state_dict, BART_CONVERTER, is_decoder=True)
# handle tensors not associated with layers
if self.cfg["tied-embeddings-src"]:
wemb_tensor = nn.Parameter(torch.FloatTensor(self.wemb))
bias_tensor = nn.Parameter(torch.FloatTensor(self.final_bias))
model.model.shared.weight = wemb_tensor
model.model.encoder.embed_tokens = model.model.decoder.embed_tokens = model.model.shared
else:
wemb_tensor = nn.Parameter(torch.FloatTensor(self.wemb))
model.model.encoder.embed_tokens.weight = wemb_tensor
decoder_wemb_tensor = nn.Parameter(torch.FloatTensor(self.dec_wemb))
bias_tensor = nn.Parameter(torch.FloatTensor(self.final_bias))
model.model.decoder.embed_tokens.weight = decoder_wemb_tensor
# handle tied embeddings, otherwise "from_pretrained" loads them incorrectly
if self.cfg["tied-embeddings"]:
model.lm_head.weight.data = model.model.decoder.embed_tokens.weight.data.clone()
model.final_logits_bias = bias_tensor
if "Wpos" in state_dict:
print("Unexpected: got Wpos")
wpos_tensor = torch.tensor(state_dict["Wpos"])
model.model.encoder.embed_positions.weight = wpos_tensor
model.model.decoder.embed_positions.weight = wpos_tensor
if cfg.normalize_embedding:
if "encoder_emb_ln_scale_pre" not in state_dict:
raise ValueError("encoder_emb_ln_scale_pre is not in state dictionary")
raise NotImplementedError("Need to convert layernorm_embedding")
if self.extra_keys:
raise ValueError(f"Failed to convert {self.extra_keys}")
if model.get_input_embeddings().padding_idx != self.pad_token_id:
raise ValueError(
f"Padding tokens {model.get_input_embeddings().padding_idx} and {self.pad_token_id} mismatched"
)
return model
def download_and_unzip(url, dest_dir):
try:
import wget
except ImportError:
raise ImportError("you must pip install wget")
filename = wget.download(url)
unzip(filename, dest_dir)
os.remove(filename)
def convert(source_dir: Path, dest_dir):
dest_dir = Path(dest_dir)
dest_dir.mkdir(exist_ok=True)
opus_state = OpusState(source_dir)
# save tokenizer
opus_state.tokenizer.save_pretrained(dest_dir)
# save_json(opus_state.cfg, dest_dir / "marian_original_config.json")
# ^^ Uncomment to save human readable marian config for debugging
model = opus_state.load_marian_model()
model = model.half()
model.save_pretrained(dest_dir)
model.from_pretrained(dest_dir) # sanity check
def load_yaml(path):
import yaml
with open(path, encoding="utf-8") as f:
return yaml.load(f, Loader=yaml.BaseLoader)
def save_json(content: Union[dict, list], path: str) -> None:
with open(path, "w") as f:
json.dump(content, f)
def unzip(zip_path: str, dest_dir: str) -> None:
with ZipFile(zip_path, "r") as zipObj:
zipObj.extractall(dest_dir)
if __name__ == "__main__":
"""
Tatoeba conversion instructions in scripts/tatoeba/README.md
"""
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src",
type=str,
help="path to marian model sub dir. yaml.load will be used to load the configuration file, please be wary of which file you're loading.",
default="en-de",
)
parser.add_argument("--dest", type=str, default=None, help="Path to the output PyTorch model.")
args = parser.parse_args()
source_dir = Path(args.src)
if not source_dir.exists():
raise ValueError(f"Source directory {source_dir} not found")
dest_dir = f"converted-{source_dir.name}" if args.dest is None else args.dest
convert(source_dir, dest_dir)
| transformers/src/transformers/models/marian/convert_marian_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/marian/convert_marian_to_pytorch.py",
"repo_id": "transformers",
"token_count": 12776
} | 485 |
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def remove_ignore_keys_(state_dict):
ignore_keys = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(k, None)
def make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data
return lin_layer
def convert_fairseq_mbart_checkpoint_from_disk(
checkpoint_path, hf_config_path="facebook/mbart-large-en-ro", finetuned=False, mbart_50=False
):
state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model"]
remove_ignore_keys_(state_dict)
vocab_size = state_dict["encoder.embed_tokens.weight"].shape[0]
mbart_config = MBartConfig.from_pretrained(hf_config_path, vocab_size=vocab_size)
if mbart_50 and finetuned:
mbart_config.activation_function = "relu"
state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"]
model = MBartForConditionalGeneration(mbart_config)
model.model.load_state_dict(state_dict)
if finetuned:
model.lm_head = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
args = parser.parse_args()
model = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_50=args.mbart_50
)
model.save_pretrained(args.pytorch_dump_folder_path)
| transformers/src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 1123
} | 486 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/minimax/modular_minimax.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_minimax.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 MiniMaxAI and HuggingFace Inc. teams. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from transformers.utils.generic import check_model_inputs
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...integrations import use_kernel_forward_from_hub
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import (
GenericForQuestionAnswering,
GenericForSequenceClassification,
GenericForTokenClassification,
GradientCheckpointingLayer,
)
from ...modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple
from ...utils.deprecation import deprecate_kwarg
from ...utils.generic import OutputRecorder
from .configuration_minimax import MiniMaxConfig
@use_kernel_forward_from_hub("RMSNorm")
class MiniMaxRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
MiniMaxRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class MiniMaxCache(DynamicCache):
def __init__(self):
super().__init__()
self.linear_cache: list[torch.Tensor] = []
def set_linear_cache(self, layer_idx, linear_cache):
# There may be skipped layers, fill them with empty lists
for _ in range(len(self.linear_cache), layer_idx + 1):
self.linear_cache.append([])
self.linear_cache[layer_idx] = linear_cache
def get_linear_cache(self, layer_idx: int):
if layer_idx < len(self):
return self.linear_cache[layer_idx]
return None
def __len__(self):
return max(super().__len__(), len(self.linear_cache))
def __getitem__(self, layer_idx: int):
if layer_idx < len(self.linear_cache) and self.linear_cache[layer_idx] != []:
return (self.linear_cache[layer_idx],)
return super().__getitem__(layer_idx)
def __iter__(self):
for layer_idx in range(len(self)):
yield self[layer_idx]
def batch_repeat_interleave(self, repeats: int):
for layer_idx in range(len(self)):
if self.linear_cache[layer_idx] != []:
self.linear_cache[layer_idx] = self.linear_cache[layer_idx].repeat_interleave(repeats, dim=0)
else:
self.layers[layer_idx].batch_repeat_interleave(repeats)
def batch_select_indices(self, indices: torch.Tensor):
for layer_idx in range(len(self)):
if self.linear_cache[layer_idx] != []:
self.linear_cache[layer_idx] = self.linear_cache[layer_idx][indices, ...]
else:
self.layers[layer_idx].batch_select_indices(indices)
def crop(self, max_length: int):
raise RuntimeError("MiniMaxCache doesnot support `crop` method")
class MiniMaxLightningAttention(nn.Module):
def __init__(self, config: MiniMaxConfig, layer_idx: int):
super().__init__()
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
self.num_attention_heads = config.num_attention_heads
self.num_hidden_layers = config.num_hidden_layers
self.block_size = config.block_size
self.act_fn = ACT2FN[config.hidden_act]
self.norm = MiniMaxRMSNorm(self.head_dim * self.num_attention_heads)
self.qkv_proj = nn.Linear(config.hidden_size, self.num_attention_heads * self.head_dim * 3, bias=False)
self.out_proj = nn.Linear(self.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.output_gate = nn.Linear(config.hidden_size, self.num_attention_heads * self.head_dim, bias=False)
slope_rate = self.get_slope_rate()
query_decay, key_decay, diagonal_decay = self.decay_factors(slope_rate)
self.register_buffer("slope_rate", slope_rate)
self.register_buffer("query_decay", query_decay)
self.register_buffer("key_decay", key_decay)
self.register_buffer("diagonal_decay", diagonal_decay)
def get_slope_rate(self):
base = 1 / (2 ** (8 / self.num_attention_heads))
exponent = torch.arange(self.num_attention_heads) + 1
factor = 1 - self.layer_idx / (self.num_hidden_layers - 1 + 1e-5) + 1e-5
rate = base**exponent
rate = rate * factor
rate = rate[:, None, None]
return rate
def decay_factors(self, slope_rate):
block_size_range = torch.arange(self.block_size) + 1
query_decay = torch.exp(-slope_rate * block_size_range[:, None])
key_decay = torch.exp(-slope_rate * (self.block_size - block_size_range[:, None]))
diagonal_decay = block_size_range[:, None] - block_size_range[None, :]
diagonal_decay = diagonal_decay[None, None, :, :]
diagonal_decay = slope_rate * diagonal_decay
diagonal_decay = torch.where(diagonal_decay >= 0, -diagonal_decay, float("-inf"))
diagonal_decay = torch.exp(diagonal_decay)
return query_decay, key_decay, diagonal_decay
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
batch_size, seq_len, hidden_size = hidden_states.shape
num_blocks = (seq_len + self.block_size - 1) // self.block_size
qkv_states = self.act_fn(self.qkv_proj(hidden_states))
qkv_states = qkv_states.reshape(batch_size, seq_len, self.num_attention_heads, 3 * self.head_dim)
query_states, key_states, value_states = torch.split(qkv_states, self.head_dim, dim=3)
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
# calculated (K.T @ V) and saved as cache
attn_weights_inter = None
if past_key_values is not None:
attn_weights_inter = past_key_values.get_linear_cache(self.layer_idx)
if attn_weights_inter is None:
attn_weights_inter = torch.zeros(batch_size, self.num_attention_heads, self.head_dim, self.head_dim).to(
value_states
)
# apply attention_mask
if attention_mask is not None:
attention_mask = attention_mask.to(dtype=torch.bool) # Ensure it's a boolean tensor
value_states = value_states.masked_fill(~attention_mask.unsqueeze(1).unsqueeze(-1), 0)
attn_output = []
for i in range(num_blocks):
start_idx = i * self.block_size
end_idx = min(start_idx + self.block_size, seq_len)
current_block_size = end_idx - start_idx
current_query_states = query_states[:, :, start_idx:end_idx]
current_key_states = key_states[:, :, start_idx:end_idx]
current_value_states = value_states[:, :, start_idx:end_idx]
current_query_decay = self.query_decay[:, :current_block_size]
current_key_decay = self.key_decay[:, -current_block_size:]
current_diagonal_decay = self.diagonal_decay[:, :, :current_block_size, :current_block_size]
block_decay = torch.exp(-self.slope_rate * current_block_size)
# intra: ( Q @ K.T ) @ V -> QK * V
attn_weights_intra = torch.matmul(current_query_states, current_key_states.transpose(-1, -2))
attn_output_intra = torch.matmul(attn_weights_intra * current_diagonal_decay, current_value_states)
# inter: Q @ ( K.T @ V ) -> Q * KV
attn_output_inter = torch.matmul(current_query_states * current_query_decay, attn_weights_inter)
# final attention output
current_attn_output = attn_output_inter + attn_output_intra
attn_output.append(current_attn_output)
# cacluate attn_weights_inter for next block or cache
next_attn_weights_inter = torch.matmul(
(current_key_states * current_key_decay).transpose(-1, -2), current_value_states
)
attn_weights_inter = attn_weights_inter * block_decay + next_attn_weights_inter
else:
ratio = torch.exp(-self.slope_rate)
attn_output = []
for i in range(seq_len):
current_query_states = query_states[:, :, i : i + 1]
current_key_states = key_states[:, :, i : i + 1]
current_value_states = value_states[:, :, i : i + 1]
current_attn_weights_inter = torch.matmul(current_key_states.transpose(-1, -2), current_value_states)
attn_weights_inter = ratio * attn_weights_inter + current_attn_weights_inter
current_attn_output = torch.matmul(current_query_states, attn_weights_inter)
attn_output.append(current_attn_output)
# concatenate attention outputs over all blocks
attn_output = torch.cat(attn_output, dim=-2)
# final output projection
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(batch_size, seq_len, self.num_attention_heads * self.head_dim)
attn_output = self.norm(attn_output)
attn_output = F.sigmoid(self.output_gate(hidden_states)) * attn_output
attn_output = self.out_proj(attn_output)
# update cache
if past_key_values is not None:
past_key_values.set_linear_cache(self.layer_idx, attn_weights_inter)
return attn_output, attn_weights_inter
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class MiniMaxAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: MiniMaxConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = self.head_dim**-0.5
self.attention_dropout = config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
sliding_window=getattr(self.config, "sliding_window", None), # main diff with Llama
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class MiniMaxBlockSparseTop2MLP(nn.Module):
def __init__(self, config: MiniMaxConfig):
super().__init__()
self.ffn_dim = config.intermediate_size
self.hidden_dim = config.hidden_size
self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
current_hidden_states = self.w2(current_hidden_states)
return current_hidden_states
class MiniMaxSparseMoeBlock(nn.Module):
"""
This implementation is
strictly equivalent to standard MoE with full capacity (no
dropped tokens). It's faster since it formulates MoE operations
in terms of block-sparse operations to accommodate imbalanced
assignments of tokens to experts, whereas standard MoE either
(1) drop tokens at the cost of reduced performance or (2) set
capacity factor to number of experts and thus waste computation
and memory on padding.
"""
def __init__(self, config):
super().__init__()
self.hidden_dim = config.hidden_size
self.ffn_dim = config.intermediate_size
self.num_experts = config.num_local_experts
self.top_k = config.num_experts_per_tok
# gating
self.gate = nn.Linear(self.hidden_dim, self.num_experts, bias=False)
self.experts = nn.ModuleList([MiniMaxBlockSparseTop2MLP(config) for _ in range(self.num_experts)])
# Jitter parameters
self.jitter_noise = config.router_jitter_noise
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
""" """
batch_size, sequence_length, hidden_dim = hidden_states.shape
if self.training and self.jitter_noise > 0:
hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
hidden_states = hidden_states.view(-1, hidden_dim)
# router_logits: (batch * sequence_length, n_experts)
router_logits = self.gate(hidden_states)
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
# we cast back to the input dtype
routing_weights = routing_weights.to(hidden_states.dtype)
final_hidden_states = torch.zeros(
(batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
)
# One hot encode the selected experts to create an expert mask
# this will be used to easily index which expert is going to be sollicitated
expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_layer = self.experts[expert_idx]
idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
# Index the correct hidden states and compute the expert hidden state for
# the current expert. We need to make sure to multiply the output hidden
# states by `routing_weights` on the corresponding tokens (top-1 and top-2)
current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
# However `index_add_` only support torch tensors for indexing so we'll use
# the `top_x` tensor here.
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
return final_hidden_states, router_logits
class MiniMaxDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: MiniMaxConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = MiniMaxAttention(config, layer_idx)
self.block_sparse_moe = MiniMaxSparseMoeBlock(config)
self.input_layernorm = MiniMaxRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = MiniMaxRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.layer_idx = layer_idx
self.layer_type = config.layer_types[layer_idx]
self.mlp_alpha_factor = config.mlp_alpha_factor
self.mlp_beta_factor = config.mlp_beta_factor
if self.layer_type == "linear_attention":
self.self_attn = MiniMaxLightningAttention(config, layer_idx)
self.attn_alpha_factor = config.linear_attn_alpha_factor
self.attn_beta_factor = config.linear_attn_beta_factor
else:
self.self_attn = MiniMaxAttention(config, layer_idx)
self.attn_alpha_factor = config.full_attn_alpha_factor
self.attn_beta_factor = config.full_attn_beta_factor
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
output_router_logits: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
attention_mask (`torch.Tensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_router_logits (`bool`, *optional*):
Whether or not to return the logits of all the routers. They are useful for computing the router loss, and
should not be returned during inference.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
hidden_states = self.input_layernorm(hidden_states)
residual = hidden_states
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = residual * self.attn_alpha_factor + hidden_states * self.attn_beta_factor
# Fully Connected
hidden_states = self.post_attention_layernorm(hidden_states)
residual = hidden_states
hidden_states, _ = self.block_sparse_moe(hidden_states)
hidden_states = residual * self.mlp_alpha_factor + hidden_states * self.mlp_beta_factor
return hidden_states
@auto_docstring
class MiniMaxPreTrainedModel(PreTrainedModel):
config: MiniMaxConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["MiniMaxDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = False
_supports_attention_backend = True
_can_record_outputs = {
"router_logits": OutputRecorder(MiniMaxSparseMoeBlock, index=1),
"hidden_states": MiniMaxDecoderLayer,
"attentions": [MiniMaxAttention, MiniMaxLightningAttention],
}
class MiniMaxRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: MiniMaxConfig, device=None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
@auto_docstring
class MiniMaxModel(MiniMaxPreTrainedModel):
def __init__(self, config: MiniMaxConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[MiniMaxDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = MiniMaxRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = MiniMaxRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[MiniMaxCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if use_cache and past_key_values is None:
past_key_values = MiniMaxCache()
elif use_cache and not isinstance(past_key_values, MiniMaxCache):
raise ValueError(
f"MiniMax uses cache of its own and is not compatible with `past_key_values` of type {type(past_key_values)}."
)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
causal_mask = mask_function(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
for decoder_layer in self.layers:
if decoder_layer.layer_type == "full_attention":
input_attention_mask = causal_mask
else:
# lightning attention uses original attention_mask, and uses it only for the first step
input_attention_mask = attention_mask
hidden_states = decoder_layer(
hidden_states,
position_embeddings=position_embeddings,
attention_mask=input_attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def load_balancing_loss_func(
gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
num_experts: Optional[int] = None,
top_k=2,
attention_mask: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, int]:
r"""
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
experts is too unbalanced.
Args:
gate_logits:
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
shape [batch_size X sequence_length, num_experts].
num_experts:
Number of experts
top_k:
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter.
attention_mask (`torch.Tensor`, *optional*):
The attention_mask used in forward function
shape [batch_size X sequence_length] if not None.
Returns:
The auxiliary loss.
"""
if gate_logits is None or not isinstance(gate_logits, tuple):
return 0
if isinstance(gate_logits, tuple):
compute_device = gate_logits[0].device
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
if attention_mask is None:
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.mean(routing_weights, dim=0)
else:
batch_size, sequence_length = attention_mask.shape
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
expert_attention_mask = (
attention_mask[None, :, :, None, None]
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
.reshape(-1, top_k, num_experts)
.to(compute_device)
)
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
expert_attention_mask, dim=0
)
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
router_per_expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, num_experts))
.reshape(-1, num_experts)
.to(compute_device)
)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
router_per_expert_attention_mask, dim=0
)
overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
return overall_loss * num_experts
@auto_docstring
class MiniMaxForCausalLM(MiniMaxPreTrainedModel, GenerationMixin):
_tied_weights_keys = ["lm_head.weight"]
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = MiniMaxModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.num_local_experts
self.num_experts_per_tok = config.num_experts_per_tok
# Initialize weights and apply final processing
self.post_init()
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_router_logits: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> MoeCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, MiniMaxForCausalLM
>>> model = MiniMaxForCausalLM.from_pretrained("MiniMaxAI/MiniMax-Text-01-hf")
>>> tokenizer = AutoTokenizer.from_pretrained("MiniMaxAI/MiniMax-Text-01-hf")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: MoeModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_router_logits=output_router_logits,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
class MiniMaxForSequenceClassification(GenericForSequenceClassification, MiniMaxPreTrainedModel):
pass
class MiniMaxForTokenClassification(GenericForTokenClassification, MiniMaxPreTrainedModel):
pass
class MiniMaxForQuestionAnswering(GenericForQuestionAnswering, MiniMaxPreTrainedModel):
pass
__all__ = [
"MiniMaxPreTrainedModel",
"MiniMaxModel",
"MiniMaxForCausalLM",
"MiniMaxForSequenceClassification",
"MiniMaxForTokenClassification",
"MiniMaxForQuestionAnswering",
]
| transformers/src/transformers/models/minimax/modeling_minimax.py/0 | {
"file_path": "transformers/src/transformers/models/minimax/modeling_minimax.py",
"repo_id": "transformers",
"token_count": 18082
} | 487 |
# Copyright 2023 Mistral AI and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import torch
from transformers import (
MixtralConfig,
MixtralForCausalLM,
)
"""
Sample usage:
```
python src/transformers/models/mixtral/convert_mixtral_weights_to_hf.py \
--input_dir /path/to/downloaded/mixtral/weights --model_size 7B --output_dir /output/path
```
Thereafter, models can be loaded via:
```py
from transformers import MixtralForCausalLM
model = MixtralForCausalLM.from_pretrained("/output/path")
```
Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
"""
def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
def read_json(path):
with open(path, "r") as f:
return json.load(f)
def write_json(text, path):
with open(path, "w") as f:
json.dump(text, f)
def write_model(model_path, input_base_path, model_size, safe_serialization=True):
os.makedirs(model_path, exist_ok=True)
params = read_json(os.path.join(input_base_path, "params.json"))
num_shards = 1
# For some reason this is a string in the params.json
sliding_window = int(params["sliding_window"]) if "sliding_window" in params else None
n_layers = params["num_hidden_layers"]
n_heads = params["num_attention_heads"]
n_heads_per_shard = n_heads // num_shards
dim = params["hidden_size"]
dims_per_head = dim // n_heads
base = params.get("rope_theta", 10000.0)
max_position_embeddings = 4096 * 8
num_local_experts = params["num_local_experts"]
ffn_dim = params["intermediate_size"]
vocab_size = params["vocab_size"]
if "num_key_value_heads" in params:
num_key_value_heads = params["num_key_value_heads"] # for GQA / MQA
num_local_key_value_heads = num_key_value_heads // num_shards
key_value_dim = dims_per_head * num_local_key_value_heads
else: # compatibility with other checkpoints
num_key_value_heads = n_heads
num_local_key_value_heads = n_heads_per_shard
key_value_dim = dim
# permute for sliced rotary
def permute(w, n_heads=n_heads, dim1=dim, dim2=dim):
return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2)
print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
# Load weights
loaded = [
torch.load(os.path.join(input_base_path, f"consolidated.{i:02d}.pt"), map_location="cpu", weights_only=True)
for i in range(8)
]
merged_state_dict = {}
for state_dict in loaded:
merged_state_dict.update(state_dict)
state_dict = {}
for layer_i in range(n_layers):
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
state_dict.update(
{
f"model.layers.{layer_i}.input_layernorm.weight": merged_state_dict[
f"layers.{layer_i}.attention_norm.weight"
].clone(),
f"model.layers.{layer_i}.post_attention_layernorm.weight": merged_state_dict[
f"layers.{layer_i}.ffn_norm.weight"
].clone(),
}
)
state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute(
merged_state_dict[f"layers.{layer_i}.attention.wq.weight"]
.view(n_heads_per_shard, dims_per_head, dim)
.reshape(dim, dim)
)
state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute(
merged_state_dict[f"layers.{layer_i}.attention.wk.weight"]
.view(num_local_key_value_heads, dims_per_head, dim)
.reshape(key_value_dim, dim),
num_key_value_heads,
key_value_dim,
dim,
)
state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = (
merged_state_dict[f"layers.{layer_i}.attention.wv.weight"]
.view(num_local_key_value_heads, dims_per_head, dim)
.reshape(key_value_dim, dim)
)
state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = merged_state_dict[
f"layers.{layer_i}.attention.wo.weight"
]
w1 = merged_state_dict[f"layers.{layer_i}.block_sparse_moe.w1"]
w2 = merged_state_dict[f"layers.{layer_i}.block_sparse_moe.w2"]
w3 = merged_state_dict[f"layers.{layer_i}.block_sparse_moe.w3"]
experts_w1 = [
w1[ffn_dim * expert_idx : ffn_dim * (expert_idx + 1), :].clone(memory_format=torch.contiguous_format)
for expert_idx in range(num_local_experts)
]
for idx, expert_block in enumerate(experts_w1):
expert_key = f"model.layers.{layer_i}.block_sparse_moe.experts.{idx}.w1"
state_dict[expert_key + ".weight"] = expert_block.clone()
experts_w2 = [
w2[ffn_dim * expert_idx : ffn_dim * (expert_idx + 1), :].clone(memory_format=torch.contiguous_format)
for expert_idx in range(num_local_experts)
]
for idx, expert_block in enumerate(experts_w2):
expert_key = f"model.layers.{layer_i}.block_sparse_moe.experts.{idx}.w2"
state_dict[expert_key + ".weight"] = expert_block.T.clone(memory_format=torch.contiguous_format)
experts_w3 = [
w3[ffn_dim * expert_idx : ffn_dim * (expert_idx + 1), :].clone(memory_format=torch.contiguous_format)
for expert_idx in range(num_local_experts)
]
for idx, expert_block in enumerate(experts_w3):
expert_key = f"model.layers.{layer_i}.block_sparse_moe.experts.{idx}.w3"
state_dict[expert_key + ".weight"] = expert_block.clone()
state_dict[f"model.layers.{layer_i}.block_sparse_moe.gate.weight"] = merged_state_dict[
f"layers.{layer_i}.block_sparse_moe.gate.weight"
]
state_dict.update(
{
"model.norm.weight": merged_state_dict["norm.weight"],
"model.embed_tokens.weight": merged_state_dict["tok_embeddings.weight"],
"lm_head.weight": merged_state_dict["output.weight"],
}
)
config = MixtralConfig(
hidden_size=dim,
intermediate_size=ffn_dim,
num_attention_heads=params["num_attention_heads"],
num_hidden_layers=params["num_hidden_layers"],
rms_norm_eps=params["rms_norm_eps"],
num_key_value_heads=num_key_value_heads,
vocab_size=vocab_size,
rope_theta=base,
max_position_embeddings=max_position_embeddings,
sliding_window=sliding_window,
num_local_experts=num_local_experts,
)
print("Loading the checkpoint in a Mixtral model.")
with torch.device("meta"):
model = MixtralForCausalLM(config)
# Avoid saving this as part of the config.
del model.config._name_or_path
model.config.dtype = torch.float16
print("Saving in the Transformers format.")
model.load_state_dict(state_dict, strict=True, assign=True)
for n, p in model.named_parameters():
assert p.device.type != "meta", f"{n} has not been loaded!"
model.save_pretrained(model_path, safe_serialization=safe_serialization)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
help="Location of Mixtral weights, which contains tokenizer.model and model folders",
required=True,
)
parser.add_argument(
"--model_size",
choices=["7B"],
help="'f' models correspond to the finetuned versions, and are specific to the Mixtral official release. For more details on Mixtral, check out the original repo: https://huggingface.co/mistral-ai",
default="7B",
)
parser.add_argument("--output_dir", help="Location to write HF model", required=True)
parser.add_argument("--safe_serialization", type=bool, help="Whether or not to save using `safetensors`.")
args = parser.parse_args()
write_model(
model_path=args.output_dir,
input_base_path=args.input_dir,
model_size=args.model_size,
safe_serialization=args.safe_serialization,
)
if __name__ == "__main__":
main()
| transformers/src/transformers/models/mixtral/convert_mixtral_weights_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/mixtral/convert_mixtral_weights_to_hf.py",
"repo_id": "transformers",
"token_count": 4000
} | 488 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for MobileViT."""
from typing import Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_processing_utils_fast import (
BaseImageProcessorFast,
DefaultFastImageProcessorKwargs,
group_images_by_shape,
reorder_images,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
SizeDict,
is_torch_tensor,
)
from ...processing_utils import Unpack
from ...utils import (
TensorType,
auto_docstring,
is_torch_available,
is_torchvision_available,
is_torchvision_v2_available,
)
if is_torch_available():
import torch
if is_torchvision_available():
if is_torchvision_v2_available():
from torchvision.transforms.v2 import functional as F
else:
from torchvision.transforms import functional as F
class MobileVitFastImageProcessorKwargs(DefaultFastImageProcessorKwargs):
"""
do_flip_channel_order (`bool`, *optional*, defaults to `self.do_flip_channel_order`):
Whether to flip the color channels from RGB to BGR or vice versa.
do_reduce_labels (`bool`, *optional*, defaults to `self.do_reduce_labels`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0
is used for background, and background itself is not included in all classes of a dataset (e.g.
ADE20k). The background label will be replaced by 255.
"""
do_flip_channel_order: Optional[bool]
do_reduce_labels: Optional[bool]
@auto_docstring
class MobileViTImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 256, "width": 256}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = None
do_convert_rgb = None
do_flip_channel_order = True
do_reduce_labels = False
valid_kwargs = MobileVitFastImageProcessorKwargs
def __init__(self, **kwargs: Unpack[MobileVitFastImageProcessorKwargs]):
super().__init__(**kwargs)
# Copied from transformers.models.beit.image_processing_beit_fast.BeitImageProcessorFast.reduce_label
def reduce_label(self, labels: list["torch.Tensor"]):
for idx in range(len(labels)):
label = labels[idx]
label = torch.where(label == 0, torch.tensor(255, dtype=label.dtype), label)
label = label - 1
label = torch.where(label == 254, torch.tensor(255, dtype=label.dtype), label)
labels[idx] = label
return label
@auto_docstring
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
**kwargs: Unpack[MobileVitFastImageProcessorKwargs],
) -> BatchFeature:
r"""
segmentation_maps (`ImageInput`, *optional*):
The segmentation maps to preprocess.
"""
return super().preprocess(images, segmentation_maps, **kwargs)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput],
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[MobileVitFastImageProcessorKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
"""
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
images_kwargs = kwargs.copy()
images_kwargs["do_reduce_labels"] = False
batch_feature = self._preprocess(images, **images_kwargs)
if segmentation_maps is not None:
processed_segmentation_maps = self._prepare_image_like_inputs(
images=segmentation_maps,
expected_ndims=2,
do_convert_rgb=False,
input_data_format=ChannelDimension.FIRST,
)
segmentation_maps_kwargs = kwargs.copy()
segmentation_maps_kwargs.update(
{
"do_rescale": False,
"do_flip_channel_order": False,
# Nearest interpolation is used for segmentation maps instead of BILINEAR.
"interpolation": F.InterpolationMode.NEAREST_EXACT
if is_torchvision_v2_available()
else F.InterpolationMode.NEAREST,
}
)
processed_segmentation_maps = self._preprocess(
images=processed_segmentation_maps, **segmentation_maps_kwargs
).pixel_values
batch_feature["labels"] = processed_segmentation_maps.squeeze(1).to(torch.int64)
return batch_feature
def _preprocess(
self,
images: list["torch.Tensor"],
do_reduce_labels: bool,
do_resize: bool,
size: Optional[SizeDict],
interpolation: Optional["F.InterpolationMode"],
do_rescale: bool,
rescale_factor: Optional[float],
do_center_crop: bool,
crop_size: Optional[SizeDict],
do_flip_channel_order: bool,
disable_grouping: bool,
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
processed_images = []
if do_reduce_labels:
images = self.reduce_label(images)
# Group images by shape for more efficient batch processing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
# Process each group of images with the same shape
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
# Reorder images to original sequence
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group again after resizing (in case resize produced different sizes)
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(image=stacked_images, size=crop_size)
if do_rescale:
stacked_images = self.rescale(image=stacked_images, scale=rescale_factor)
if do_flip_channel_order:
# For batched images, we need to handle them all at once
if stacked_images.ndim > 3 and stacked_images.shape[1] >= 3:
# Flip RGB → BGR for batched images
flipped = stacked_images.clone()
flipped[:, 0:3] = stacked_images[:, [2, 1, 0], ...]
stacked_images = flipped
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
# Stack all processed images if return_tensors is specified
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def post_process_semantic_segmentation(self, outputs, target_sizes: Optional[list[tuple]] = None):
"""
Converts the output of [`MobileNetV2ForSemanticSegmentation`] into semantic segmentation maps. Only supports PyTorch.
Args:
outputs ([`MobileNetV2ForSemanticSegmentation`]):
Raw outputs of the model.
target_sizes (`list[Tuple]` of length `batch_size`, *optional*):
List of tuples corresponding to the requested final size (height, width) of each prediction. If unset,
predictions will not be resized.
Returns:
semantic_segmentation: `list[torch.Tensor]` of length `batch_size`, where each item is a semantic
segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is
specified). Each entry of each `torch.Tensor` correspond to a semantic class id.
"""
logits = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(logits) != len(target_sizes):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits"
)
if is_torch_tensor(target_sizes):
target_sizes = target_sizes.numpy()
semantic_segmentation = []
for idx in range(len(logits)):
resized_logits = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False
)
semantic_map = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(semantic_map)
else:
semantic_segmentation = logits.argmax(dim=1)
semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
__all__ = ["MobileViTImageProcessorFast"]
| transformers/src/transformers/models/mobilevit/image_processing_mobilevit_fast.py/0 | {
"file_path": "transformers/src/transformers/models/mobilevit/image_processing_mobilevit_fast.py",
"repo_id": "transformers",
"token_count": 4295
} | 489 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/moonshine/modular_moonshine.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_moonshine.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...configuration_utils import PretrainedConfig
from ...modeling_rope_utils import rope_config_validation
class MoonshineConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`MoonshineModel`]. It is used to instantiate a Moonshine
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the Moonshine
[UsefulSensors/moonshine-tiny](https://huggingface.co/UsefulSensors/moonshine-tiny).
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 32768):
Vocabulary size of the Moonshine model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`MoonshineModel`].
hidden_size (`int`, *optional*, defaults to 288):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 1152):
Dimension of the MLP representations.
encoder_num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer encoder.
decoder_num_hidden_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer decoder.
encoder_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer decoder.
encoder_num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`encoder_num_key_value_heads=encoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
`encoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`num_attention_heads`.
decoder_num_key_value_heads (`int`, *optional*):
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
`decoder_num_key_value_heads=decoder_num_attention_heads`, the model will use Multi Head Attention (MHA), if
`decoder_num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
by meanpooling all the original heads within that group. For more details, check out [this
paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to
`decoder_num_attention_heads`.
pad_head_dim_to_multiple_of (`int`, *optional*):
Pad head dimension in encoder and decoder to the next multiple of this value. Necessary for using certain
optimized attention implementations.
encoder_hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder.
decoder_hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
max_position_embeddings (`int`, *optional*, defaults to 512):
The maximum sequence length that this model might ever be used with.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
decoder_start_token_id (`int`, *optional*, defaults to 1):
Corresponds to the "<|startoftranscript|>" token, which is automatically used when no `decoder_input_ids`
are provided to the `generate` function. It is used to guide the model`s generation process depending on
the task.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
rope_theta (`float`, *optional*, defaults to 10000.0):
The base period of the RoPE embeddings.
rope_scaling (`Dict`, *optional*):
Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
accordingly.
Expected contents:
`rope_type` (`str`):
The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
'llama3'], with 'default' being the original RoPE implementation.
`factor` (`float`, *optional*):
Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
most scaling types, a `factor` of x will enable the model to handle sequences of length x *
original maximum pre-trained length.
`original_max_position_embeddings` (`int`, *optional*):
Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
pretraining.
`attention_factor` (`float`, *optional*):
Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
computation. If unspecified, it defaults to value recommended by the implementation, using the
`factor` field to infer the suggested value.
`beta_fast` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
ramp function. If unspecified, it defaults to 32.
`beta_slow` (`float`, *optional*):
Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
ramp function. If unspecified, it defaults to 1.
`short_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to short contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`long_factor` (`list[float]`, *optional*):
Only used with 'longrope'. The scaling factor to be applied to long contexts (<
`original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
size divided by the number of attention heads divided by 2
`low_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
`high_freq_factor` (`float`, *optional*):
Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
partial_rotary_factor (`float`, *optional*, defaults to 0.9):
Percentage of the query and keys which will have rotary embedding.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether the model is used as an encoder/decoder or not.
attention_bias (`bool`, *optional*, defaults to `False`):
Whether to use a bias in the query, key, value and output projection layers during self-attention.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
bos_token_id (`int`, *optional*, defaults to 1):
Denotes beginning of sequences token id.
eos_token_id (`int`, *optional*, defaults to 2):
Denotes end of sequences token id.
Example:
```python
>>> from transformers import MoonshineModel, MoonshineConfig
>>> # Initializing a Moonshine style configuration
>>> configuration = MoonshineConfig().from_pretrained("UsefulSensors/moonshine-tiny")
>>> # Initializing a model from the configuration
>>> model = MoonshineModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "moonshine"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {
"num_key_value_heads": "encoder_num_key_value_heads",
"num_attention_heads": "encoder_num_attention_heads",
"num_hidden_layers": "encoder_num_hidden_layers",
}
def __init__(
self,
vocab_size=32768,
hidden_size=288,
intermediate_size=1152,
encoder_num_hidden_layers=6,
decoder_num_hidden_layers=6,
encoder_num_attention_heads=8,
decoder_num_attention_heads=8,
encoder_num_key_value_heads=None,
decoder_num_key_value_heads=None,
pad_head_dim_to_multiple_of=None,
encoder_hidden_act="gelu",
decoder_hidden_act="silu",
max_position_embeddings=512,
initializer_range=0.02,
decoder_start_token_id=1,
use_cache=True,
rope_theta=10000.0,
rope_scaling=None,
partial_rotary_factor=0.9,
is_encoder_decoder=True,
attention_bias=False,
attention_dropout=0.0,
bos_token_id=1,
eos_token_id=2,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.encoder_num_hidden_layers = encoder_num_hidden_layers
self.decoder_num_hidden_layers = decoder_num_hidden_layers
self.encoder_num_attention_heads = encoder_num_attention_heads
self.decoder_num_attention_heads = decoder_num_attention_heads
if encoder_num_key_value_heads is None:
encoder_num_key_value_heads = encoder_num_attention_heads
self.encoder_num_key_value_heads = encoder_num_key_value_heads
if decoder_num_key_value_heads is None:
decoder_num_key_value_heads = decoder_num_attention_heads
self.decoder_num_key_value_heads = decoder_num_key_value_heads
self.pad_head_dim_to_multiple_of = pad_head_dim_to_multiple_of
self.encoder_hidden_act = encoder_hidden_act
self.decoder_hidden_act = decoder_hidden_act
self.max_position_embeddings = max_position_embeddings
self.initializer_range = initializer_range
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.rope_theta = rope_theta
self.rope_scaling = rope_scaling
self.partial_rotary_factor = partial_rotary_factor
self.is_encoder_decoder = is_encoder_decoder
self.attention_bias = attention_bias
self.attention_dropout = attention_dropout
# Validate the correctness of rotary position embeddings parameters
rope_config_validation(self)
super().__init__(
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
__all__ = ["MoonshineConfig"]
| transformers/src/transformers/models/moonshine/configuration_moonshine.py/0 | {
"file_path": "transformers/src/transformers/models/moonshine/configuration_moonshine.py",
"repo_id": "transformers",
"token_count": 5437
} | 490 |
# coding=utf-8
# Copyright 2023 HuggingFace Inc. team and MosaicML NLP team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch MPT model."""
import math
from typing import Optional, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
from torch.nn import functional as F
from ...cache_utils import Cache, DynamicCache
from ...generation import GenerationMixin
from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast,
TokenClassifierOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from ...utils.deprecation import deprecate_kwarg
from .configuration_mpt import MptConfig
logger = logging.get_logger(__name__)
def build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max=8, device=None):
r"""
Link to paper: https://huggingface.co/papers/2108.12409 - Alibi tensor is not causal as the original paper mentions, it
relies on a translation invariance of softmax for quick implementation. This implementation has been copied from
the alibi implementation of MPT source code that led to slightly different results than the Bloom alibi:
https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L292
"""
alibi = torch.arange(1 - sequence_length, 1, dtype=torch.int32, device=device).view(1, 1, 1, sequence_length)
num_heads_power_of_2 = 2 ** math.ceil(math.log2(num_heads))
base = torch.arange(1, num_heads_power_of_2 + 1, dtype=torch.int64, device=device).float()
base = base * (alibi_bias_max / num_heads_power_of_2)
slopes = 1.0 / torch.pow(2, base)
slopes = slopes.view(1, num_heads_power_of_2, 1, 1)
if num_heads_power_of_2 != num_heads:
slopes = torch.concat([slopes[:, 1::2, ...], slopes[:, ::2, ...]], dim=1)[:, :num_heads, ...]
alibi = alibi * slopes
return alibi.squeeze(0)
class MptAttention(nn.Module):
"""Multi-head self attention.
Using torch or triton attention implementation enables user to also use additive bias.
"""
def __init__(self, config: MptConfig, layer_idx: Optional[int] = None):
super().__init__()
self.hidden_size = config.hidden_size
self.n_heads = config.n_heads
self.max_seq_length = config.max_seq_len
self.head_dim = self.hidden_size // self.n_heads
self.softmax_scale = config.attn_config.softmax_scale
if self.softmax_scale is None:
self.softmax_scale = 1 / math.sqrt(self.hidden_size / self.n_heads)
self.attn_dropout_p = config.attn_config.attn_pdrop
self.clip_qkv = config.attn_config.clip_qkv
self.Wqkv = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False)
self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
self.layer_idx = layer_idx
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
position_bias: torch.Tensor,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
cache_position: Optional[torch.Tensor] = None,
):
batch_size, seq_length = hidden_states.shape[:2]
mixed_qkv = self.Wqkv(hidden_states)
if self.clip_qkv:
mixed_qkv = mixed_qkv.clamp(min=-self.clip_qkv, max=self.clip_qkv)
query_states, key_states, value_states = mixed_qkv.chunk(3, dim=2)
query_states = query_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2)
key_states = key_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2)
value_states = value_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2)
if past_key_values is not None:
cache_kwargs = {"cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) * self.softmax_scale
query_length = seq_length if past_key_values is None else seq_length + past_key_values.get_seq_length()
if position_bias is not None:
if len(position_bias.shape) != 3:
raise ValueError(f"Expecting position_bias shape to be 3 dimensions, got {len(position_bias.shape)}")
key_length = key_states.shape[-2]
position_bias_query_index = max(0, position_bias.size(1) - query_length)
position_bias_key_index = max(0, position_bias.size(2) - key_length)
position_bias = position_bias[:, position_bias_query_index:, position_bias_key_index:]
attention_scores = attention_scores + position_bias
if attention_mask is not None:
attention_scores = attention_scores.masked_fill(attention_mask, torch.finfo(query_states.dtype).min)
# (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.softmax(attention_scores.float(), dim=-1).to(value_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.attn_dropout_p, training=self.training)
context_states = torch.matmul(attn_weights, value_states)
context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1)
attn_output = self.out_proj(context_states)
return attn_output, attn_weights
class MptMLP(nn.Module):
def __init__(self, config: MptConfig):
super().__init__()
hidden_size = config.hidden_size
self.up_proj = nn.Linear(hidden_size, 4 * hidden_size, bias=False)
self.act = nn.GELU(approximate="none")
self.down_proj = nn.Linear(4 * hidden_size, hidden_size, bias=False)
self.hidden_dropout = config.attn_config.attn_pdrop
def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
hidden_states = self.act(self.up_proj(hidden_states))
intermediate_output = self.down_proj(hidden_states)
output = F.dropout(intermediate_output, p=self.hidden_dropout, training=self.training)
output = output + residual
return output
class MptBlock(GradientCheckpointingLayer):
def __init__(self, config: MptConfig, layer_idx: Optional[int] = None):
super().__init__()
hidden_size = config.hidden_size
self.norm_1 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
# backward compatibility with weights on the Hub
self.norm_1.bias = None
self.num_heads = config.n_heads
self.attn = MptAttention(config, layer_idx)
self.norm_2 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
# backward compatibility with weights on the Hub
self.norm_2.bias = None
self.ffn = MptMLP(config)
self.dropout_rate = config.attn_config.attn_pdrop
self.resid_attn_dropout = nn.Dropout(self.dropout_rate)
def forward(
self,
hidden_states: torch.Tensor,
position_bias: torch.Tensor,
attention_mask: torch.Tensor,
layer_past: Optional[Cache] = None,
use_cache: bool = False,
output_attentions: bool = False,
cache_position: Optional[torch.Tensor] = None,
):
# hidden_states: [batch_size, seq_length, hidden_size]
# Layer norm at the beginning of the transformer layer.
layernorm_output = self.norm_1(hidden_states)
residual = hidden_states
# Self attention.
attn_outputs, attn_weights = self.attn(
layernorm_output,
position_bias=position_bias,
attention_mask=attention_mask,
past_key_values=layer_past,
cache_position=cache_position,
)
hidden_states = self.resid_attn_dropout(attn_outputs) + residual
layernorm_output = self.norm_2(hidden_states)
# Get residual
residual = hidden_states
# MLP.
output = self.ffn(layernorm_output, residual)
return output, attn_weights
@auto_docstring
class MptPreTrainedModel(PreTrainedModel):
config: MptConfig
base_model_prefix = "transformer"
supports_gradient_checkpointing = True
_no_split_modules = ["MptBlock"]
_keys_to_ignore_on_load_missing = [r"lm_head.*."]
def __init__(self, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, LayerNorm):
if module.bias is not None:
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@staticmethod
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def _convert_to_mpt_cache(
past_key_values: tuple[tuple[torch.Tensor, torch.Tensor]],
) -> tuple[tuple[torch.Tensor, torch.Tensor]]:
"""
Converts the cache to the format expected by Mpt, i.e. to tuple(tuple([batch_size * num_heads, ...]))
"""
batch_size, num_heads, head_dim, seq_length = past_key_values[0][0].shape
batch_size_times_num_heads = batch_size * num_heads
# key: [batch_size, num_heads, head_dim, seq_length] -> [batch_size * num_heads, head_dim, seq_length]
# value: [batch_size, num_heads, seq_length, head_dim] -> [batch_size * num_heads, seq_length, head_dim]
return tuple(
(
layer_past[0].reshape(batch_size_times_num_heads, head_dim, seq_length),
layer_past[1].reshape(batch_size_times_num_heads, seq_length, head_dim),
)
for layer_past in past_key_values
)
@auto_docstring
class MptModel(MptPreTrainedModel):
def __init__(self, config: MptConfig):
super().__init__(config)
self.hidden_size = config.hidden_size
self.num_heads = config.n_heads
# Embedding + LN Embedding
self.wte = nn.Embedding(config.vocab_size, self.hidden_size)
# Transformer blocks
self.blocks = nn.ModuleList([MptBlock(config, layer_idx=i) for i in range(config.n_layers)])
# Final Layer Norm
self.norm_f = LayerNorm(self.hidden_size, eps=config.layer_norm_epsilon)
# backward compatibility with weights on the Hub
self.norm_f.bias = None
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.wte
def build_mpt_alibi_tensor(self, num_heads, sequence_length, alibi_bias_max=8, device=None):
return build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max, device)
def set_input_embeddings(self, new_embeddings: torch.Tensor):
self.wte = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Union[tuple[tuple[torch.Tensor, torch.Tensor], ...], Cache]] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs, # NOOP kwargs, for now
) -> Union[tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
batch_size, seq_length = input_ids.shape
elif inputs_embeds is not None:
batch_size, seq_length, _ = inputs_embeds.shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache()
if use_cache and isinstance(past_key_values, tuple):
logger.warning_once(
"Passing a tuple of `past_key_values` is deprecated and will be removed in Transformers v4.58.0. "
"You should pass an instance of `DynamicCache` instead, e.g. "
"`past_key_values=DynamicCache.from_legacy_cache(past_key_values)`."
)
past_key_values = DynamicCache.from_legacy_cache(past_key_values)
hidden_states = inputs_embeds
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
# Compute alibi tensor: check build_alibi_tensor documentation
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
seq_length_with_past = seq_length + past_key_values_length
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
else:
attention_mask = attention_mask.to(hidden_states.device)
alibi = self.build_mpt_alibi_tensor(self.num_heads, self.config.max_seq_len, device=hidden_states.device)
causal_mask = _prepare_4d_causal_attention_mask(
attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
)
causal_mask = causal_mask.bool()
for block in self.blocks:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(
hidden_states,
layer_past=past_key_values,
attention_mask=causal_mask,
use_cache=use_cache,
output_attentions=output_attentions,
position_bias=alibi,
cache_position=cache_position,
)
hidden_states = outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[1],)
# Add last hidden state
hidden_states = self.norm_f(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions] if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring(
custom_intro="""
The MPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
"""
)
class MptForCausalLM(MptPreTrainedModel, GenerationMixin):
_tied_weights_keys = ["lm_head.weight"]
def __init__(self, config: MptConfig):
super().__init__(config)
self.transformer = MptModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def set_output_embeddings(self, new_embeddings: torch.Tensor):
self.lm_head = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[tuple[tuple[torch.Tensor, torch.Tensor], ...]] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs,
) -> Union[tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# move labels to correct device to enable model parallelism
labels = labels.to(lm_logits.device)
# Flatten the tokens
loss = self.loss_function(
lm_logits,
labels,
vocab_size=self.config.vocab_size,
**kwargs,
)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring(
custom_intro="""
The MPT Model transformer with a sequence classification head on top (linear layer).
[`MptForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-1) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
"""
)
class MptForSequenceClassification(MptPreTrainedModel):
def __init__(self, config: MptConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = MptModel(config)
self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[tuple[tuple[torch.Tensor, torch.Tensor], ...]] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring
class MptForTokenClassification(MptPreTrainedModel):
def __init__(self, config: MptConfig):
super().__init__(config)
self.num_labels = config.num_labels
self.transformer = MptModel(config)
if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
classifier_dropout = config.classifier_dropout
elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
classifier_dropout = config.hidden_dropout
else:
classifier_dropout = 0.1
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[tuple[tuple[torch.Tensor, torch.Tensor], ...]] = None,
attention_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**deprecated_arguments,
) -> Union[tuple[torch.Tensor], TokenClassifierOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
hidden_states = self.dropout(hidden_states)
logits = self.classifier(hidden_states)
loss = None
if labels is not None:
# move labels to correct device to enable model parallelism
labels = labels.to(logits.device)
batch_size, seq_length = labels.shape
loss_fct = CrossEntropyLoss()
loss = loss_fct(
logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
)
if not return_dict:
output = (logits,) + transformer_outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@auto_docstring
class MptForQuestionAnswering(MptPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.transformer = MptModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, QuestionAnsweringModelOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()`
(`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"MptForCausalLM",
"MptModel",
"MptPreTrainedModel",
"MptForSequenceClassification",
"MptForTokenClassification",
"MptForQuestionAnswering",
]
| transformers/src/transformers/models/mpt/modeling_mpt.py/0 | {
"file_path": "transformers/src/transformers/models/mpt/modeling_mpt.py",
"repo_id": "transformers",
"token_count": 15312
} | 491 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Text/audio processor class for MusicGen
"""
from typing import Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class MusicgenProcessor(ProcessorMixin):
r"""
Constructs a MusicGen processor which wraps an EnCodec feature extractor and a T5 tokenizer into a single processor
class.
[`MusicgenProcessor`] offers all the functionalities of [`EncodecFeatureExtractor`] and [`TTokenizer`]. See
[`~MusicgenProcessor.__call__`] and [`~MusicgenProcessor.decode`] for more information.
Args:
feature_extractor (`EncodecFeatureExtractor`):
An instance of [`EncodecFeatureExtractor`]. The feature extractor is a required input.
tokenizer (`T5Tokenizer`):
An instance of [`T5Tokenizer`]. The tokenizer is a required input.
"""
feature_extractor_class = "EncodecFeatureExtractor"
tokenizer_class = ("T5Tokenizer", "T5TokenizerFast")
def __init__(self, feature_extractor, tokenizer):
super().__init__(feature_extractor, tokenizer)
self.current_processor = self.feature_extractor
self._in_target_context_manager = False
def get_decoder_prompt_ids(self, task=None, language=None, no_timestamps=True):
return self.tokenizer.get_decoder_prompt_ids(task=task, language=language, no_timestamps=no_timestamps)
def __call__(self, *args, **kwargs):
"""
Forwards the `audio` argument to EncodecFeatureExtractor's [`~EncodecFeatureExtractor.__call__`] and the `text`
argument to [`~T5Tokenizer.__call__`]. Please refer to the docstring of the above two methods for more
information.
"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*args, **kwargs)
audio = kwargs.pop("audio", None)
sampling_rate = kwargs.pop("sampling_rate", None)
text = kwargs.pop("text", None)
if len(args) > 0:
audio = args[0]
args = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process.")
if text is not None:
inputs = self.tokenizer(text, **kwargs)
if audio is not None:
audio_inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs)
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
inputs["input_values"] = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
inputs["padding_mask"] = audio_inputs["padding_mask"]
return inputs
def batch_decode(self, *args, **kwargs):
"""
This method is used to decode either batches of audio outputs from the MusicGen model, or batches of token ids
from the tokenizer. In the case of decoding token ids, this method forwards all its arguments to T5Tokenizer's
[`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information.
"""
audio_values = kwargs.pop("audio", None)
padding_mask = kwargs.pop("padding_mask", None)
if len(args) > 0:
audio_values = args[0]
args = args[1:]
if audio_values is not None:
return self._decode_audio(audio_values, padding_mask=padding_mask)
else:
return self.tokenizer.batch_decode(*args, **kwargs)
def _decode_audio(self, audio_values, padding_mask: Optional = None) -> list[np.ndarray]:
"""
This method strips any padding from the audio values to return a list of numpy audio arrays.
"""
audio_values = to_numpy(audio_values)
bsz, channels, seq_len = audio_values.shape
if padding_mask is None:
return list(audio_values)
padding_mask = to_numpy(padding_mask)
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
difference = seq_len - padding_mask.shape[-1]
padding_value = 1 - self.feature_extractor.padding_value
padding_mask = np.pad(padding_mask, ((0, 0), (0, difference)), "constant", constant_values=padding_value)
audio_values = audio_values.tolist()
for i in range(bsz):
sliced_audio = np.asarray(audio_values[i])[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
audio_values[i] = sliced_audio.reshape(channels, -1)
return audio_values
__all__ = ["MusicgenProcessor"]
| transformers/src/transformers/models/musicgen/processing_musicgen.py/0 | {
"file_path": "transformers/src/transformers/models/musicgen/processing_musicgen.py",
"repo_id": "transformers",
"token_count": 2075
} | 492 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example for running:
0. Cp ckpts to local
aws s3 cp --recursive s3://ai2-llm/checkpoints/OLMoE/olmoe-8x1b-newhp-newds-final-annealFrom1200000/step23842 /data/niklas/llm/checkpoints/olmoe-8x1b-newhp-newds-final-annealFrom1200000_step23842
1. Unshard your OLMoE checkpoint using https://github.com/allenai/OLMo/blob/7d63fe09d23cf23714da5aa633a44a90180195da/scripts/unshard.py
python OLMo/scripts/unshard.py /data/niklas/llm/checkpoints/23485/step954000 /data/niklas/llm/checkpoints/1b-954000-unsharded --model-only
python OLMo/scripts/unshard.py /data/niklas/llm/checkpoints/23485/step954000 /data/niklas/llm/checkpoints/1b-954000-unsharded --model-only
python OLMo/scripts/unshard.py /data/niklas/llm/checkpoints/olmoe-8x1b-newhp-newds-final-annealFrom1200000_step23842 /data/niklas/llm/checkpoints/olmoe-8x1b-newhp-newds-final-annealFrom1200000_step23842-unsharded --model-only
2. Convert to transformers
rm -rf olmoe; mkdir olmoe; python /data/niklas/transformers/src/transformers/models/olmoe/convert_olmoe_weights_to_hf.py --input_dir /data/niklas/llm/checkpoints/olmoe-8x1b-newhp-newds-final-annealFrom1200000_step23842-unsharded --tokenizer_json_path /data/niklas/llm/checkpoints/olmoe-step1200000-unsharded/tokenizer.json --output_dir olmoe
3. Load model via:
```
from transformers import OlmoeForCausalLM, AutoTokenizer
import torch
model = OlmoeForCausalLM.from_pretrained("../transformers/olmoe", dtype=torch.bfloat16).cuda()
model = OlmoeForCausalLM.from_pretrained("../transformers/olmoe").cuda()
tokenizer = AutoTokenizer.from_pretrained("../transformers/olmoe")
inputs = tokenizer("Bitcoin is", return_tensors="pt")
inputs = {k: v.cuda() for k, v in inputs.items()}
out = model.generate(**inputs, max_length=64)
print(tokenizer.decode(out[0]))
# > # Bitcoin is a digital currency that is created and held electronically. No one controls it. Bitcoins aren’t printed, like dollars or euros – they’re produced by people and businesses running computers all around the world, using software that solves mathematical
# Or quick sanity check:
o = model(torch.tensor([[0, 1]]).cuda())
# If the checkpoint is not converted to BF16 but kept in FP32:
# > # Bitcoin is a digital currency that is not controlled by any central authority. It is a peer-to-peer payment system that allows users to send and receive payments from anywhere in the world. Bitcoin is also known as a cryptocurrency because it uses cryptography to secure transactions and prevent fraud.
```
Note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions
come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM).
Compare with OLMo codebase:
```
from olmo.model import OLMo
import torch
model = OLMo.from_checkpoint("/data/niklas/llm/checkpoints/olmoe-step1200000-unsharded-pt")
model = model.cuda()
model = model.to(torch.bfloat16)
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("../transformers/olmoe")
inputs = tokenizer("Bitcoin is", return_tensors="pt")
inputs = {k: v.cuda() for k, v in inputs.items()}
out = model.generate(**inputs)
print(tokenizer.decode(out[0][0][0]))
# Bitcoin is a digital currency that is created and held electronically. No one controls it. Bitcoins aren’t printed, like dollars or euros – they’re produced by people and businesses running computers all around the world, using software that solves mathematical problems. It’s the first example of a growing category of money
# Or quick sanity check:
o = model(torch.tensor([[0, 1]]).cuda())
```
"""
import argparse
import gc
import json
import os
import shutil
from pathlib import Path
import torch
import yaml
from tokenizers import Tokenizer
from transformers import OlmoeConfig, OlmoeForCausalLM
from transformers.models.gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
def read_json(path):
with open(path, "r") as f:
return json.load(f)
def write_json(text, path):
with open(path, "w") as f:
json.dump(text, f)
def write_model(model_path, input_base_path, tokenizer_path=None, safe_serialization=True, fix_eos_token_id=True):
os.makedirs(model_path, exist_ok=True)
tmp_model_path = os.path.join(model_path, "tmp")
os.makedirs(tmp_model_path, exist_ok=True)
config_path = Path(input_base_path) / "config.yaml"
olmoe_config = yaml.safe_load(config_path.read_text())["model"]
if fix_eos_token_id:
olmoe_config["eos_token_id"] = 50279
n_layers = olmoe_config["n_layers"]
n_heads = olmoe_config["n_heads"]
dim = olmoe_config["d_model"]
dims_per_head = dim // n_heads
base = 10000.0
inv_freq = 1.0 / (base ** (torch.arange(0, dims_per_head, 2).float() / dims_per_head))
max_position_embeddings = olmoe_config["max_sequence_length"]
vocab_size = olmoe_config.get("embedding_size", olmoe_config["vocab_size"])
if olmoe_config.get("n_kv_heads", None) is not None:
num_key_value_heads = olmoe_config["n_kv_heads"] # for GQA / MQA
elif olmoe_config["multi_query_attention"]: # compatibility with other checkpoints
num_key_value_heads = 1
else:
num_key_value_heads = n_heads
print(f"Fetching all parameters from the checkpoint at {input_base_path}.")
# Not sharded
loaded = torch.load(os.path.join(input_base_path, "model.pt"), map_location="cpu", weights_only=True)
param_count = 0
index_dict = {"weight_map": {}}
for layer_i in range(n_layers):
filename = f"pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"
fused_dims = [dim, dims_per_head * num_key_value_heads, dims_per_head * num_key_value_heads]
q_proj_weight, k_proj_weight, v_proj_weight = torch.split(
loaded[f"transformer.blocks.{layer_i}.att_proj.weight"], fused_dims, dim=0
)
state_dict = {
f"model.layers.{layer_i}.self_attn.q_proj.weight": q_proj_weight,
f"model.layers.{layer_i}.self_attn.k_proj.weight": k_proj_weight,
f"model.layers.{layer_i}.self_attn.v_proj.weight": v_proj_weight,
f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"transformer.blocks.{layer_i}.attn_out.weight"],
f"model.layers.{layer_i}.self_attn.q_norm.weight": loaded[f"transformer.blocks.{layer_i}.q_norm.weight"],
f"model.layers.{layer_i}.self_attn.k_norm.weight": loaded[f"transformer.blocks.{layer_i}.k_norm.weight"],
f"model.layers.{layer_i}.mlp.gate.weight": loaded[f"transformer.blocks.{layer_i}.ffn.router.layer.weight"],
f"model.layers.{layer_i}.input_layernorm.weight": loaded[f"transformer.blocks.{layer_i}.attn_norm.weight"],
f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[
f"transformer.blocks.{layer_i}.ff_norm.weight"
],
}
num_experts = loaded[f"transformer.blocks.{layer_i}.ffn.router.layer.weight"].shape[0]
dim_per_expert = loaded[f"transformer.blocks.{layer_i}.ffn.experts.mlp.w1"].shape[0] // num_experts
for expert_i in range(num_experts):
state_dict[f"model.layers.{layer_i}.mlp.experts.{expert_i}.gate_proj.weight"] = loaded[
f"transformer.blocks.{layer_i}.ffn.experts.mlp.w1"
][dim_per_expert * expert_i : dim_per_expert * (expert_i + 1), :]
state_dict[f"model.layers.{layer_i}.mlp.experts.{expert_i}.up_proj.weight"] = loaded[
f"transformer.blocks.{layer_i}.ffn.experts.mlp.v1"
][dim_per_expert * expert_i : dim_per_expert * (expert_i + 1), :]
state_dict[f"model.layers.{layer_i}.mlp.experts.{expert_i}.down_proj.weight"] = loaded[
f"transformer.blocks.{layer_i}.ffn.experts.mlp.w2"
][dim_per_expert * expert_i : dim_per_expert * (expert_i + 1), :].T.contiguous()
state_dict[f"model.layers.{layer_i}.self_attn.rotary_emb.inv_freq"] = inv_freq
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
filename = f"pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"
# Unsharded
state_dict = {
"model.embed_tokens.weight": loaded["transformer.wte.weight"],
"lm_head.weight": loaded["transformer.ff_out.weight"],
"model.norm.weight": loaded["transformer.ln_f.weight"],
}
for k, v in state_dict.items():
index_dict["weight_map"][k] = filename
param_count += v.numel()
torch.save(state_dict, os.path.join(tmp_model_path, filename))
# Write configs
index_dict["metadata"] = {"total_size": param_count * 2}
write_json(index_dict, os.path.join(tmp_model_path, "pytorch_model.bin.index.json"))
config = OlmoeConfig(
vocab_size=vocab_size,
hidden_size=dim,
intermediate_size=dim_per_expert,
num_hidden_layers=n_layers,
num_attention_heads=n_heads,
num_key_value_heads=num_key_value_heads,
max_position_embeddings=max_position_embeddings,
pad_token_id=olmoe_config["pad_token_id"],
bos_token_id=None,
eos_token_id=olmoe_config["eos_token_id"],
tie_word_embeddings=olmoe_config["weight_tying"],
rope_theta=base,
clip_qkv=olmoe_config.get("clip_qkv"),
)
config.save_pretrained(tmp_model_path)
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
if tokenizer_path is not None:
_write_tokenizer(model_path, config, tokenizer_path, fix_eos_token_id)
print("Loading the checkpoint in a OLMoE model.")
model = OlmoeForCausalLM.from_pretrained(tmp_model_path, dtype=torch.bfloat16)
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format.")
model.save_pretrained(model_path, safe_serialization=safe_serialization)
shutil.rmtree(tmp_model_path)
def _write_tokenizer(
output_path: Path, config: OlmoeConfig, input_tokenizer_path: Path, fix_eos_token_id: bool = True
) -> None:
print(f"Saving a {GPTNeoXTokenizerFast.__name__} to {output_path}.")
base_tokenizer = Tokenizer.from_file(str(input_tokenizer_path))
eos_token_id = config.eos_token_id if config.eos_token_id is not None else base_tokenizer.get_vocab_size() - 1
pad_token_id = config.pad_token_id if config.pad_token_id is not None else eos_token_id
if fix_eos_token_id and eos_token_id == 0:
# Fixing a bug in OLMo where eos token id was incorrectly set
print("Changing eos_token_id from 0 to 50279.")
eos_token_id = 50279
tokenizer = GPTNeoXTokenizerFast(
tokenizer_object=base_tokenizer,
eos_token=base_tokenizer.decode([eos_token_id], skip_special_tokens=False),
pad_token=base_tokenizer.decode([pad_token_id], skip_special_tokens=False),
unk_token=None,
bos_token=None,
)
tokenizer.save_pretrained(output_path)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_dir",
required=True,
help="Location of OLMoE weights, which contains config.yaml and model.pt.",
)
parser.add_argument(
"--tokenizer_json_path",
default=None,
help="Location of OLMoE tokenizer json file.",
)
parser.add_argument(
"--output_dir",
required=True,
help="Location to write HF model and tokenizer",
)
parser.add_argument(
"--no_fix_eos_token_id",
action="store_false",
dest="fix_eos_token_id",
help="If set, does not change eos token id from 0 to 50279 if it is 0. Changing 0 to 50279 is a bug fix, so use this option with care.",
)
parser.add_argument(
"--safe_serialization", type=bool, default=True, help="Whether or not to save using `safetensors`."
)
args = parser.parse_args()
write_model(
model_path=args.output_dir,
input_base_path=args.input_dir,
safe_serialization=args.safe_serialization,
tokenizer_path=args.tokenizer_json_path,
fix_eos_token_id=args.fix_eos_token_id,
)
if __name__ == "__main__":
main()
| transformers/src/transformers/models/olmoe/convert_olmoe_weights_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/olmoe/convert_olmoe_weights_to_hf.py",
"repo_id": "transformers",
"token_count": 5307
} | 493 |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert OpenAI GPT checkpoint."""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
# Construct model
if openai_config_file == "":
config = OpenAIGPTConfig()
else:
config = OpenAIGPTConfig.from_json_file(openai_config_file)
model = OpenAIGPTModel(config)
# Load weights from numpy
load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}")
torch.save(model.state_dict(), pytorch_weights_dump_path)
print(f"Save configuration file to {pytorch_config_dump_path}")
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
args = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| transformers/src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/openai/convert_openai_original_tf_checkpoint_to_pytorch.py",
"repo_id": "transformers",
"token_count": 987
} | 494 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/ovis2/modular_ovis2.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_ovis2.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from dataclasses import dataclass
from typing import Callable, Optional, Union
import torch
from torch import nn
from ...activations import ACT2FN
from ...generation import GenerationMixin
from ...integrations import use_kernel_forward_from_hub
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPast
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...utils import ModelOutput, auto_docstring, can_return_tuple
from ..auto import AutoModel
from .configuration_ovis2 import Ovis2Config, Ovis2VisionConfig
@dataclass
@auto_docstring(
custom_intro="""
Base class for Llava outputs, with hidden states and attentions.
"""
)
class Ovis2ModelOutputWithPast(BaseModelOutputWithPast):
r"""
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Ovis2 causal language model (or autoregressive) outputs.
"""
)
class Ovis2CausalLMOutputWithPast(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
`(batch_size, num_heads, sequence_length, embed_size_per_head)`)
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size (batch_size * num_patches, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[list[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
@use_kernel_forward_from_hub("RMSNorm")
class Ovis2RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Ovis2RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class Ovis2VisionMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
class Ovis2VisionEmbeddings(nn.Module):
def __init__(self, config: Ovis2VisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
padding="valid",
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
self.rms_norm = Ovis2RMSNorm(config.hidden_size, config.rms_norm_eps)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
target_dtype = self.patch_embedding.weight.dtype
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
embeddings = patch_embeds.flatten(2).transpose(1, 2)
embeddings = self.rms_norm(embeddings)
embeddings = embeddings + self.position_embedding(self.position_ids)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
if attention_mask is not None:
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class Ovis2VisionAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
queries,
keys,
values,
attention_mask,
is_causal=self.is_causal,
scaling=self.scale,
dropout=0.0 if not self.training else self.dropout,
)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
class Ovis2MLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
return down_proj
class Ovis2Attention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_heads
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
f" {self.num_heads})."
)
self.scale = self.head_dim**-0.5
self.dropout = config.attention_dropout
self.is_causal = False
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.qkv_bias)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
"""Input shape: Batch x Time x Channel"""
batch_size, seq_length, embed_dim = hidden_states.shape
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
queries,
keys,
values,
attention_mask,
is_causal=self.is_causal,
scaling=self.scale,
dropout=0.0 if not self.training else self.dropout,
)
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
class Ovis2VisionEncoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Ovis2VisionConfig):
super().__init__()
self.attention = Ovis2Attention(config)
self.ffn = Ovis2MLP(config)
self.rms_norm1 = Ovis2RMSNorm(config.hidden_size, config.rms_norm_eps)
self.rms_norm2 = Ovis2RMSNorm(config.hidden_size, config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor, torch.Tensor]:
norm_hidden_states = self.rms_norm1(hidden_states)
attn_output, attn_weights = self.attention(hidden_states=norm_hidden_states, attention_mask=attention_mask)
hidden_states = hidden_states + attn_output
norm_hidden_states = self.rms_norm2(hidden_states)
mlp_output = self.ffn(norm_hidden_states)
hidden_states = hidden_states + mlp_output
return (hidden_states, attn_weights) if output_attentions else (hidden_states, None)
class Ovis2VisionEncoder(nn.Module):
"""
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
[`Ovis2VisionEncoderLayer`].
Args:
config: Ovis2VisionConfig
"""
def __init__(self, config: Ovis2VisionConfig):
super().__init__()
self.config = config
self.layers = nn.ModuleList([Ovis2VisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
# Ignore copy
@can_return_tuple
def forward(
self,
inputs_embeds,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
) -> BaseModelOutput:
r"""
Args:
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
hidden_states = inputs_embeds
for encoder_layer in self.layers:
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_states,
attentions=all_attentions,
)
class Ovis2VisionTransformer(nn.Module):
def __init__(self, config: Ovis2VisionConfig):
super().__init__()
self.config = config
self.embeddings = Ovis2VisionEmbeddings(config)
self.encoder = Ovis2VisionEncoder(config)
self.rms_norm = Ovis2RMSNorm(config.hidden_size, config.rms_norm_eps)
self.gradient_checkpointing = False
@can_return_tuple
def forward(
self,
pixel_values,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
hidden_states = self.embeddings(pixel_values)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
last_hidden_state = encoder_outputs[0]
last_hidden_state = self.rms_norm(last_hidden_state)
return BaseModelOutput(
last_hidden_state=last_hidden_state,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class Ovis2VisualEmbeddingTable(nn.Embedding):
def forward(self, visual_tokens: torch.Tensor) -> torch.Tensor:
if visual_tokens.dtype in [torch.int8, torch.int16, torch.int32, torch.int64, torch.long]:
return super().forward(visual_tokens)
return torch.matmul(visual_tokens, self.weight)
class Ovis2PreTrainedModel(PreTrainedModel):
config: Ovis2Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Ovis2VisionAttention"]
_skip_keys_device_placement = "past_key_values"
_supports_cache_class = True
_supports_flash_attn = True
_supports_flex_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_attention_backend = True
def hard_softmax(logits: torch.Tensor, dim: int):
y_soft = logits.softmax(dim)
# Straight through.
index = y_soft.max(dim, keepdim=True)[1]
y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
ret = y_hard - y_soft.detach() + y_soft
return ret
class Ovis2VisionModel(Ovis2PreTrainedModel):
config: Ovis2VisionConfig
def __init__(self, config: Ovis2VisionConfig):
super().__init__(config)
self.config = config
self.transformer = Ovis2VisionTransformer(config)
self.num_visual_indicator_tokens = config.num_visual_indicator_tokens
self.vocab_size = config.vocab_size
self.head_linear = nn.Linear(
config.hidden_size * config.hidden_stride * config.hidden_stride,
self.vocab_size - self.num_visual_indicator_tokens,
bias=False,
)
self.head_norm = nn.LayerNorm(self.vocab_size - self.num_visual_indicator_tokens)
def forward(self, pixel_values: torch.FloatTensor) -> tuple[torch.Tensor, torch.Tensor]:
outputs = self.transformer(pixel_values)
last_hidden_state = outputs.last_hidden_state
if self.config.hidden_stride > 1:
num_images, seq_len, hidden_dim = last_hidden_state.shape
hidden_stride = self.config.hidden_stride
sqrt_l = int(math.sqrt(seq_len))
if sqrt_l * sqrt_l != seq_len:
raise ValueError("Token sequence length must be a perfect square")
pad_size = (hidden_stride - (sqrt_l % hidden_stride)) % hidden_stride
last_hidden_state = nn.functional.pad(last_hidden_state, (0, 0, 0, pad_size, 0, pad_size), "constant", 0)
sqrt_l += pad_size
last_hidden_state = last_hidden_state.reshape(
num_images, sqrt_l // hidden_stride, hidden_stride, sqrt_l // hidden_stride, hidden_stride, hidden_dim
)
last_hidden_state = last_hidden_state.permute(0, 1, 3, 2, 4, 5)
last_hidden_state = last_hidden_state.reshape(
num_images, -1, hidden_stride * hidden_stride * hidden_dim
) # (n, (sqrt_l//hs)^2, hs^2*d)
logits = self.head_linear(last_hidden_state)
logits = self.head_norm(logits)
if self.config.tokenize_function == "gumbel_argmax":
prob_token = nn.functional.gumbel_softmax(logits, dim=-1, hard=True)
elif self.config.tokenize_function == "st_argmax":
prob_token = hard_softmax(logits, dim=-1)
elif self.config.tokenize_function == "softmax":
prob_token = nn.functional.softmax(logits, dim=-1)
return prob_token
@auto_docstring(
custom_intro="""
The Ovis2 model which consists of a vision backbone and a language model, without a language modeling head.
"""
)
class Ovis2Model(Ovis2PreTrainedModel):
_checkpoint_conversion_mapping = {}
def __init__(self, config: Ovis2Config):
super().__init__(config)
self.vision_tower = Ovis2VisionModel(config.vision_config)
self.language_model = AutoModel.from_config(config.text_config)
self.visual_embeddings_table = Ovis2VisualEmbeddingTable(config.vision_config.vocab_size, config.hidden_size)
self.visual_vocab_size = config.vision_config.vocab_size
self.vocab_size = config.vocab_size
self.visual_indicator_token_ids = config.visual_indicator_token_ids
self.post_init()
def get_input_embeddings(self):
return self.language_model.get_input_embeddings()
def set_input_embeddings(self, value):
self.language_model.set_input_embeddings(value)
def set_decoder(self, decoder):
self.language_model = decoder
def get_decoder(self):
return self.language_model
def get_image_features(
self,
pixel_values: torch.FloatTensor,
) -> torch.FloatTensor:
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, channels, height, width)`):
The tensors corresponding to the input images.
vision_feature_layer (`Union[int, list[int]]`, *optional*):
The index of the layer to select the vision feature. If multiple indices are provided,
the vision feature of the corresponding indices will be concatenated to form the
vision features.
vision_feature_select_strategy (`str`, *optional*):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`
Returns:
image_features (`torch.Tensor`): Image feature tensor of shape `(num_images, image_length, embed_dim)`).
"""
image_features = self.vision_tower(pixel_values)
batch_size, img_seq_len, _ = image_features.shape
padding_tensor = torch.zeros(
(batch_size, img_seq_len, self.vision_tower.num_visual_indicator_tokens),
dtype=image_features.dtype,
device=image_features.device,
requires_grad=False,
layout=image_features.layout,
)
image_features = torch.cat([image_features, padding_tensor], dim=2)
image_features = self.visual_embeddings_table(image_features)
visual_indicator = torch.arange(
self.visual_vocab_size - self.vision_tower.num_visual_indicator_tokens,
self.visual_vocab_size,
dtype=torch.long,
).to(image_features.device)
visual_indicator_features = self.visual_embeddings_table(visual_indicator)
return image_features, visual_indicator_features
def get_placeholder_mask(
self, input_ids: torch.LongTensor, inputs_embeds: torch.FloatTensor, image_features: torch.FloatTensor
):
"""
Obtains multimodal placeholdr mask from `input_ids` or `inputs_embeds`, and checks that the placeholder token count is
equal to the length of multimodal features. If the lengths are different, an error is raised.
"""
if input_ids is None:
special_image_mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(self.config.image_token_id, dtype=torch.long, device=inputs_embeds.device)
)
special_image_mask = special_image_mask.all(-1)
else:
special_image_mask = input_ids == self.config.image_token_id
n_image_tokens = special_image_mask.sum()
special_image_mask = special_image_mask.unsqueeze(-1).expand_as(inputs_embeds).to(inputs_embeds.device)
n_image_features = image_features.shape[0] * image_features.shape[1]
if inputs_embeds[special_image_mask].numel() != image_features.numel():
raise ValueError(
f"Image features and image tokens do not match: tokens: {n_image_tokens}, features {n_image_features}"
)
return special_image_mask
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor = None,
pixel_values: torch.FloatTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[list[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, Ovis2ModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features, visual_indicator_features = self.get_image_features(pixel_values=pixel_values)
special_image_mask = self.get_placeholder_mask(
input_ids,
inputs_embeds=inputs_embeds,
image_features=image_features,
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
for i, visual_indicator_id in enumerate(self.visual_indicator_token_ids):
if input_ids is None:
mask = inputs_embeds == self.get_input_embeddings()(
torch.tensor(visual_indicator_id, dtype=torch.long, device=inputs_embeds.device)
)
mask = mask.all(-1)
else:
mask = (input_ids == visual_indicator_id).to(inputs_embeds.device)
if mask.any():
inputs_embeds[mask] = (
visual_indicator_features[i]
.expand_as(inputs_embeds[mask])
.to(inputs_embeds.device, inputs_embeds.dtype)
)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
return Ovis2ModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
@auto_docstring
class Ovis2ForConditionalGeneration(Ovis2PreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {}
_tied_weights_keys = ["lm_head.weight"]
def __init__(self, config: Ovis2Config):
super().__init__(config)
self.model = Ovis2Model(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def set_decoder(self, decoder):
self.model.set_decoder(decoder)
def get_decoder(self):
return self.model.get_decoder()
def get_image_features(self, pixel_values: torch.FloatTensor):
return self.model.get_image_features(pixel_values=pixel_values)
# Make modules available through conditional class for BC
@property
def language_model(self):
return self.model.language_model
@property
def vision_tower(self):
return self.model.vision_tower
@property
def multi_modal_projector(self):
raise AttributeError("Not needed for Ovis2")
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: torch.LongTensor = None,
pixel_values: torch.FloatTensor = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[list[torch.FloatTensor]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, Ovis2CausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, Ovis2ForConditionalGeneration
>>> model = Ovis2ForConditionalGeneration.from_pretrained("thisisiron/Ovis2-2B-hf")
>>> processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-2B-hf")
>>> prompt = "<|im_start|>user\n<image>\nDescribe the image.<|im_end|>\n<|im_start|>assistant\n"
>>> url = "http://images.cocodataset.org/val2014/COCO_val2014_000000537955.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, text=prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_new_tokens=15)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True)[0]
"user\n\nDescribe the image.\nassistant\nThe image features a brown dog standing on a wooden floor, looking up with"
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return Ovis2CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
if cache_position[0] == 0:
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
model_inputs["pixel_values"] = pixel_values
return model_inputs
__all__ = ["Ovis2PreTrainedModel", "Ovis2Model", "Ovis2ForConditionalGeneration"]
| transformers/src/transformers/models/ovis2/modeling_ovis2.py/0 | {
"file_path": "transformers/src/transformers/models/ovis2/modeling_ovis2.py",
"repo_id": "transformers",
"token_count": 16210
} | 495 |
# coding=utf-8
# Copyright 2025 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast Image processor class for OwlViT"""
import warnings
from typing import TYPE_CHECKING, Optional, Union
from ...image_processing_utils_fast import BaseImageProcessorFast
from ...image_transforms import center_to_corners_format
from ...image_utils import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, PILImageResampling
from ...utils import TensorType, auto_docstring, is_torch_available, logging
if TYPE_CHECKING:
from .modeling_owlvit import OwlViTObjectDetectionOutput
if is_torch_available():
import torch
from .image_processing_owlvit import _scale_boxes, box_iou
logger = logging.get_logger(__name__)
@auto_docstring
class OwlViTImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"height": 768, "width": 768}
default_to_square = True
crop_size = {"height": 768, "width": 768}
do_resize = True
do_center_crop = False
do_rescale = True
do_normalize = None
do_convert_rgb = None
model_input_names = ["pixel_values"]
# Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process
def post_process(self, outputs, target_sizes):
"""
Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format.
Args:
outputs ([`OwlViTObjectDetectionOutput`]):
Raw outputs of the model.
target_sizes (`torch.Tensor` of shape `(batch_size, 2)`):
Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original
image size (before any data augmentation). For visualization, this should be the image size after data
augment, but before padding.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model.
"""
# TODO: (amy) add support for other frameworks
warnings.warn(
"`post_process` is deprecated and will be removed in v5 of Transformers, please use"
" `post_process_object_detection` instead, with `threshold=0.` for equivalent results.",
FutureWarning,
)
logits, boxes = outputs.logits, outputs.pred_boxes
if len(logits) != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
if target_sizes.shape[1] != 2:
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
probs = torch.max(logits, dim=-1)
scores = torch.sigmoid(probs.values)
labels = probs.indices
# Convert to [x0, y0, x1, y1] format
boxes = center_to_corners_format(boxes)
# Convert from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device)
boxes = boxes * scale_fct[:, None, :]
results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)]
return results
# Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process_object_detection
def post_process_object_detection(
self,
outputs: "OwlViTObjectDetectionOutput",
threshold: float = 0.1,
target_sizes: Optional[Union[TensorType, list[tuple]]] = None,
):
"""
Converts the raw output of [`OwlViTForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format.
Args:
outputs ([`OwlViTObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.1):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the following keys:
- "scores": The confidence scores for each predicted box on the image.
- "labels": Indexes of the classes predicted by the model on the image.
- "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.
"""
batch_logits, batch_boxes = outputs.logits, outputs.pred_boxes
batch_size = len(batch_logits)
if target_sizes is not None and len(target_sizes) != batch_size:
raise ValueError("Make sure that you pass in as many target sizes as images")
# batch_logits of shape (batch_size, num_queries, num_classes)
batch_class_logits = torch.max(batch_logits, dim=-1)
batch_scores = torch.sigmoid(batch_class_logits.values)
batch_labels = batch_class_logits.indices
# Convert to [x0, y0, x1, y1] format
batch_boxes = center_to_corners_format(batch_boxes)
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
batch_boxes = _scale_boxes(batch_boxes, target_sizes)
results = []
for scores, labels, boxes in zip(batch_scores, batch_labels, batch_boxes):
keep = scores > threshold
scores = scores[keep]
labels = labels[keep]
boxes = boxes[keep]
results.append({"scores": scores, "labels": labels, "boxes": boxes})
return results
# Copied from transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.post_process_image_guided_detection
def post_process_image_guided_detection(self, outputs, threshold=0.0, nms_threshold=0.3, target_sizes=None):
"""
Converts the output of [`OwlViTForObjectDetection.image_guided_detection`] into the format expected by the COCO
api.
Args:
outputs ([`OwlViTImageGuidedObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.0):
Minimum confidence threshold to use to filter out predicted boxes.
nms_threshold (`float`, *optional*, defaults to 0.3):
IoU threshold for non-maximum suppression of overlapping boxes.
target_sizes (`torch.Tensor`, *optional*):
Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to
None, predictions will not be unnormalized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image
in the batch as predicted by the model. All labels are set to None as
`OwlViTForObjectDetection.image_guided_detection` perform one-shot object detection.
"""
logits, target_boxes = outputs.logits, outputs.target_pred_boxes
if target_sizes is not None and len(logits) != len(target_sizes):
raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits")
if target_sizes is not None and target_sizes.shape[1] != 2:
raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
probs = torch.max(logits, dim=-1)
scores = torch.sigmoid(probs.values)
# Convert to [x0, y0, x1, y1] format
target_boxes = center_to_corners_format(target_boxes)
# Apply non-maximum suppression (NMS)
if nms_threshold < 1.0:
for idx in range(target_boxes.shape[0]):
for i in torch.argsort(-scores[idx]):
if not scores[idx][i]:
continue
ious = box_iou(target_boxes[idx][i, :].unsqueeze(0), target_boxes[idx])[0][0]
ious[i] = -1.0 # Mask self-IoU.
scores[idx][ious > nms_threshold] = 0.0
# Convert from relative [0, 1] to absolute [0, height] coordinates
if target_sizes is not None:
target_boxes = _scale_boxes(target_boxes, target_sizes)
# Compute box display alphas based on prediction scores
results = []
alphas = torch.zeros_like(scores)
for idx in range(target_boxes.shape[0]):
# Select scores for boxes matching the current query:
query_scores = scores[idx]
if not query_scores.nonzero().numel():
continue
# Apply threshold on scores before scaling
query_scores[query_scores < threshold] = 0.0
# Scale box alpha such that the best box for each query has alpha 1.0 and the worst box has alpha 0.1.
# All other boxes will either belong to a different query, or will not be shown.
max_score = torch.max(query_scores) + 1e-6
query_alphas = (query_scores - (max_score * 0.1)) / (max_score * 0.9)
query_alphas = torch.clip(query_alphas, 0.0, 1.0)
alphas[idx] = query_alphas
mask = alphas[idx] > 0
box_scores = alphas[idx][mask]
boxes = target_boxes[idx][mask]
results.append({"scores": box_scores, "labels": None, "boxes": boxes})
return results
__all__ = ["OwlViTImageProcessorFast"]
| transformers/src/transformers/models/owlvit/image_processing_owlvit_fast.py/0 | {
"file_path": "transformers/src/transformers/models/owlvit/image_processing_owlvit_fast.py",
"repo_id": "transformers",
"token_count": 4305
} | 496 |
# coding=utf-8
# Copyright 2021, Google and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PEGASUS model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class PegasusConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`PegasusModel`]. It is used to instantiate an
PEGASUS model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the PEGASUS
[google/pegasus-large](https://huggingface.co/google/pegasus-large) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 50265):
Vocabulary size of the PEGASUS model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`PegasusModel`] or [`TFPegasusModel`].
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models)
forced_eos_token_id (`int`, *optional*, defaults to 1):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
Example:
```python
>>> from transformers import PegasusConfig, PegasusModel
>>> # Initializing a PEGASUS google/pegasus-large style configuration
>>> configuration = PegasusConfig()
>>> # Initializing a model (with random weights) from the google/pegasus-large style configuration
>>> model = PegasusModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "pegasus"
keys_to_ignore_at_inference = ["past_key_values"]
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(
self,
vocab_size=50265,
max_position_embeddings=1024,
encoder_layers=12,
encoder_ffn_dim=4096,
encoder_attention_heads=16,
decoder_layers=12,
decoder_ffn_dim=4096,
decoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_layerdrop=0.0,
use_cache=True,
is_encoder_decoder=True,
activation_function="gelu",
d_model=1024,
dropout=0.1,
attention_dropout=0.0,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=0,
scale_embedding=False,
pad_token_id=0,
eos_token_id=1,
forced_eos_token_id=1,
**kwargs,
):
self.vocab_size = vocab_size
self.max_position_embeddings = max_position_embeddings
self.d_model = d_model
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.activation_function = activation_function
self.init_std = init_std
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.use_cache = use_cache
self.num_hidden_layers = encoder_layers
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
forced_eos_token_id=forced_eos_token_id,
**kwargs,
)
@property
def num_attention_heads(self) -> int:
return self.encoder_attention_heads
@property
def hidden_size(self) -> int:
return self.d_model
__all__ = ["PegasusConfig"]
| transformers/src/transformers/models/pegasus/configuration_pegasus.py/0 | {
"file_path": "transformers/src/transformers/models/pegasus/configuration_pegasus.py",
"repo_id": "transformers",
"token_count": 2890
} | 497 |
# coding=utf-8
# Copyright 2021 Deepmind and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Perceiver model."""
import abc
import math
from collections.abc import Mapping
from dataclasses import dataclass
from functools import reduce
from operator import __add__
from typing import Any, Callable, Optional, Union
import numpy as np
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...modeling_outputs import BaseModelOutputWithCrossAttentions
from ...modeling_utils import PreTrainedModel
from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
from ...utils import ModelOutput, auto_docstring, logging, torch_int
from .configuration_perceiver import PerceiverConfig
ModalitySizeType = Mapping[str, int]
PreprocessorOutputType = tuple[torch.Tensor, Optional[torch.Tensor], torch.Tensor]
PreprocessorType = Callable[..., PreprocessorOutputType]
PostprocessorType = Callable[..., Any]
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
Base class for Perceiver base model's outputs, with potential hidden states, attentions and cross-attentions.
"""
)
class PerceiverModelOutput(ModelOutput):
r"""
logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
"""
logits: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Perceiver decoder outputs, with potential cross-attentions.
"""
)
class PerceiverDecoderOutput(ModelOutput):
r"""
logits (`torch.FloatTensor` of shape `(batch_size, num_labels)`):
Output of the basic decoder.
"""
logits: Optional[torch.FloatTensor] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Perceiver's masked language model outputs.
"""
)
class PerceiverMaskedLMOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Masked language modeling (MLM) loss.
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Perceiver's outputs of sequence/image classification models, optical flow and multimodal
autoencoding.
"""
)
class PerceiverClassifierOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
class PerceiverEmbeddings(nn.Module):
"""Construct the latent embeddings."""
def __init__(self, config):
super().__init__()
self.latents = nn.Parameter(torch.randn(config.num_latents, config.d_latents))
def forward(self, batch_size: int):
return self.latents.expand(batch_size, -1, -1) # Thanks, Phil Wang
class PerceiverSelfAttention(nn.Module):
"""Multi-headed {cross, self}-attention. Can be used both in the encoder as well as in the decoder."""
def __init__(
self,
config,
is_cross_attention=False,
qk_channels=None,
v_channels=None,
num_heads=1,
q_dim=None,
kv_dim=None,
):
super().__init__()
self.num_heads = num_heads
# Q and K must have the same number of channels.
# Default to preserving Q's input's shape.
if qk_channels is None:
qk_channels = q_dim
# V's num_channels determines the shape of the output of QKV-attention.
# Default to the same number of channels used in the key-query operation.
if v_channels is None:
v_channels = qk_channels
if qk_channels % num_heads != 0:
raise ValueError(f"qk_channels ({qk_channels}) must be divisible by num_heads ({num_heads}).")
if v_channels % num_heads != 0:
raise ValueError(f"v_channels ({v_channels}) must be divisible by num_heads ({num_heads}).")
self.qk_channels = qk_channels
self.v_channels = v_channels
self.qk_channels_per_head = self.qk_channels // num_heads
self.v_channels_per_head = self.v_channels // num_heads
# Layer normalization
self.layernorm1 = nn.LayerNorm(q_dim)
self.layernorm2 = nn.LayerNorm(kv_dim) if is_cross_attention else nn.Identity()
# Projection matrices
self.query = nn.Linear(q_dim, qk_channels)
self.key = nn.Linear(kv_dim, qk_channels)
self.value = nn.Linear(kv_dim, v_channels)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, channels_per_head):
new_x_shape = x.size()[:-1] + (self.num_heads, channels_per_head)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs: Optional[torch.FloatTensor] = None,
inputs_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
hidden_states = self.layernorm1(hidden_states)
inputs = self.layernorm2(inputs)
# Project queries, keys and values to a common feature dimension. If this is instantiated as a cross-attention module,
# the keys and values come from the inputs; the attention mask needs to be such that the inputs's non-relevant tokens are not attended to.
is_cross_attention = inputs is not None
queries = self.query(hidden_states)
if is_cross_attention:
keys = self.key(inputs)
values = self.value(inputs)
attention_mask = inputs_mask
else:
keys = self.key(hidden_states)
values = self.value(hidden_states)
# Reshape channels for multi-head attention.
# We reshape from (batch_size, time, channels) to (batch_size, num_heads, time, channels per head)
queries = self.transpose_for_scores(queries, self.qk_channels_per_head)
keys = self.transpose_for_scores(keys, self.qk_channels_per_head)
values = self.transpose_for_scores(values, self.v_channels_per_head)
# Take the dot product between the queries and keys to get the raw attention scores.
attention_scores = torch.matmul(queries, keys.transpose(-1, -2))
batch_size, num_heads, seq_len, q_head_dim = queries.shape
_, _, _, v_head_dim = values.shape
hiddens = self.num_heads * v_head_dim
attention_scores = attention_scores / math.sqrt(q_head_dim)
if attention_mask is not None:
# Apply the attention mask (precomputed for all layers in PerceiverModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, values)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (hiddens,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class PerceiverSelfOutput(nn.Module):
def __init__(self, config, input_channels, output_channels):
super().__init__()
self.dense = nn.Linear(input_channels, output_channels)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
return hidden_states
class PerceiverAttention(nn.Module):
"""Attention module, including a dense block."""
def __init__(
self,
config,
is_cross_attention=False,
qk_channels=None,
v_channels=None,
num_heads=1,
q_dim=None,
kv_dim=None,
use_query_residual=True,
):
super().__init__()
# MultiHead attention
if is_cross_attention and qk_channels is None:
if config.cross_attention_shape_for_attention == "q":
qk_channels = q_dim
elif config.cross_attention_shape_for_attention == "kv":
qk_channels = kv_dim
else:
raise ValueError(
f"Unknown value {config.cross_attention_shape_for_attention} for "
"cross_attention_shape_for_attention."
)
else:
if qk_channels is None:
qk_channels = q_dim
if v_channels is None:
v_channels = qk_channels
self.self = PerceiverSelfAttention(
config,
is_cross_attention=is_cross_attention,
qk_channels=qk_channels,
v_channels=v_channels,
num_heads=num_heads,
q_dim=q_dim,
kv_dim=kv_dim,
)
# dense block
output_channels = None
if is_cross_attention:
output_channels = q_dim
else:
if output_channels is None:
output_channels = v_channels
self.output = PerceiverSelfOutput(config, input_channels=self.self.v_channels, output_channels=output_channels)
self.use_query_residual = use_query_residual
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs: Optional[torch.FloatTensor] = None,
inputs_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
inputs,
inputs_mask,
output_attentions,
)
# Output projection
attention_output = self.output(self_outputs[0])
# Optionally include a residual to the original queries.
# Consider omitting the residual if the semantics of query and output
# are different, e.g. if queries are positions and outputs are pixels.
if self.use_query_residual:
attention_output = attention_output + hidden_states
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class PerceiverMLP(nn.Module):
"""A Transformer-style dense module to follow attention."""
def __init__(self, config, input_size, widening_factor):
super().__init__()
self.dense1 = nn.Linear(input_size, widening_factor * input_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.dense2 = nn.Linear(widening_factor * input_size, input_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense1(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.dense2(hidden_states)
return hidden_states
class PerceiverLayer(nn.Module):
def __init__(
self,
config,
is_cross_attention=False,
qk_channels=None,
v_channels=None,
num_heads=1,
q_dim=None,
kv_dim=None,
widening_factor=4,
use_query_residual=True,
):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = PerceiverAttention(
config,
is_cross_attention=is_cross_attention,
qk_channels=qk_channels,
v_channels=v_channels,
num_heads=num_heads,
q_dim=q_dim,
kv_dim=kv_dim,
use_query_residual=use_query_residual,
)
self.layernorm = nn.LayerNorm(q_dim)
self.mlp = PerceiverMLP(config, input_size=q_dim, widening_factor=widening_factor)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs: Optional[torch.FloatTensor] = None,
inputs_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
inputs,
inputs_mask,
output_attentions,
)
attention_output = attention_outputs[0]
outputs = attention_outputs[1:] # add attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
layer_output = layer_output + attention_output # residual connection
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
layer_output = self.layernorm(attention_output)
layer_output = self.mlp(layer_output)
return layer_output
class PerceiverEncoder(nn.Module):
"""The Perceiver Encoder: a scalable, fully attentional encoder."""
def __init__(self, config, kv_dim=None):
super().__init__()
self.config = config
# Check that we can use multihead-attention with these shapes.
if config.d_latents % config.num_self_attention_heads != 0:
raise ValueError(
f"num_z_channels ({config.d_latents}) must be divisible by"
f" num_self_attend_heads ({config.num_self_attention_heads})."
)
if config.d_latents % config.num_cross_attention_heads != 0:
raise ValueError(
f"num_z_channels ({config.d_latents}) must be divisible by"
f" num_cross_attend_heads ({config.num_cross_attention_heads})."
)
# Construct the cross attention layer.
self.cross_attention = PerceiverLayer(
config,
is_cross_attention=True,
qk_channels=config.qk_channels,
v_channels=config.v_channels,
num_heads=config.num_cross_attention_heads,
q_dim=config.d_latents,
kv_dim=kv_dim,
widening_factor=config.cross_attention_widening_factor,
use_query_residual=config.use_query_residual,
)
# Construct a single block of self-attention layers.
# We get deeper architectures by applying this block more than once.
self_attention_layers = []
for _ in range(config.num_self_attends_per_block):
layer = PerceiverLayer(
config,
is_cross_attention=False,
qk_channels=config.qk_channels,
v_channels=config.v_channels,
num_heads=config.num_self_attention_heads,
q_dim=config.d_latents,
kv_dim=config.d_latents,
widening_factor=config.self_attention_widening_factor,
)
self_attention_layers.append(layer)
self.self_attends = nn.ModuleList(self_attention_layers)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs: Optional[torch.FloatTensor] = None,
inputs_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
output_hidden_states: Optional[bool] = False,
return_dict: Optional[bool] = True,
) -> Union[tuple, BaseModelOutputWithCrossAttentions]:
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions else None
# Apply the cross-attention between the latents (hidden_states) and inputs:
layer_outputs = self.cross_attention(
hidden_states,
attention_mask=attention_mask,
head_mask=None,
inputs=inputs,
inputs_mask=inputs_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_cross_attentions = all_cross_attentions + (layer_outputs[1],)
# Apply the block of self-attention layers more than once:
for _ in range(self.config.num_blocks):
for i, layer_module in enumerate(self.self_attends):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
layer_outputs = layer_module(
hidden_states,
attention_mask=attention_mask,
head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithCrossAttentions(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
@auto_docstring
class PerceiverPreTrainedModel(PreTrainedModel):
config: PerceiverConfig
base_model_prefix = "perceiver"
main_input_name = "inputs"
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif hasattr(module, "latents"):
module.latents.data.normal_(mean=0.0, std=self.config.initializer_range)
elif hasattr(module, "position_embeddings") and isinstance(module, PerceiverTrainablePositionEncoding):
module.position_embeddings.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.ParameterDict):
for modality in module:
module[modality].data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@auto_docstring(
custom_intro="""
The Perceiver: a scalable, fully attentional architecture.
<Tip>
Note that it's possible to fine-tune Perceiver on higher resolution images than the ones it has been trained on, by
setting `interpolate_pos_encoding` to `True` in the forward of the model. This will interpolate the pre-trained
position embeddings to the higher resolution.
</Tip>
"""
)
class PerceiverModel(PerceiverPreTrainedModel):
def __init__(
self,
config,
decoder: Optional["PerceiverAbstractDecoder"] = None,
input_preprocessor: PreprocessorType = None,
output_postprocessor: PostprocessorType = None,
):
r"""
decoder (`PerceiverDecoder`, *optional*):
Decoder module that transforms latent representations into task predictions.
input_preprocessor (`PreprocessorType`, *optional*):
Preprocessor that encodes raw inputs into tensors for the model.
output_postprocessor (`PostprocessorType`, *optional*):
Postprocessor that transforms model outputs into final predictions.
"""
super().__init__(config)
self.config = config
self.input_preprocessor = input_preprocessor
self.output_postprocessor = output_postprocessor
self.embeddings = PerceiverEmbeddings(config)
self.encoder = PerceiverEncoder(
config, kv_dim=input_preprocessor.num_channels if input_preprocessor is not None else config.d_model
)
self.decoder = decoder
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.latents
def set_input_embeddings(self, value):
self.embeddings.latents = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@auto_docstring
def forward(
self,
inputs: torch.FloatTensor,
attention_mask: Optional[torch.FloatTensor] = None,
subsampled_output_points: Optional[dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, PerceiverModelOutput]:
r"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
subsampled_output_points (`dict[str, torch.Tensor]`, *optional*):
Dictionary of tensors used as queries for the decoder. The decoder maps these queries to the latent
representation of the model. Used for subsampled decoding, e.g. when only decoding certain image patches.
Examples:
```python
>>> from transformers import PerceiverConfig, PerceiverTokenizer, PerceiverImageProcessor, PerceiverModel
>>> from transformers.models.perceiver.modeling_perceiver import (
... PerceiverTextPreprocessor,
... PerceiverImagePreprocessor,
... PerceiverClassificationDecoder,
... )
>>> import torch
>>> import requests
>>> from PIL import Image
>>> # EXAMPLE 1: using the Perceiver to classify texts
>>> # - we define a TextPreprocessor, which can be used to embed tokens
>>> # - we define a ClassificationDecoder, which can be used to decode the
>>> # final hidden states of the latents to classification logits
>>> # using trainable position embeddings
>>> config = PerceiverConfig()
>>> preprocessor = PerceiverTextPreprocessor(config)
>>> decoder = PerceiverClassificationDecoder(
... config,
... num_channels=config.d_latents,
... trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1),
... use_query_residual=True,
... )
>>> model = PerceiverModel(config, input_preprocessor=preprocessor, decoder=decoder)
>>> # you can then do a forward pass as follows:
>>> tokenizer = PerceiverTokenizer()
>>> text = "hello world"
>>> inputs = tokenizer(text, return_tensors="pt").input_ids
>>> with torch.no_grad():
... outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2]
>>> # to train, one can train the model using standard cross-entropy:
>>> criterion = torch.nn.CrossEntropyLoss()
>>> labels = torch.tensor([1])
>>> loss = criterion(logits, labels)
>>> # EXAMPLE 2: using the Perceiver to classify images
>>> # - we define an ImagePreprocessor, which can be used to embed images
>>> config = PerceiverConfig(image_size=224)
>>> preprocessor = PerceiverImagePreprocessor(
... config,
... prep_type="conv1x1",
... spatial_downsample=1,
... out_channels=256,
... position_encoding_type="trainable",
... concat_or_add_pos="concat",
... project_pos_dim=256,
... trainable_position_encoding_kwargs=dict(
... num_channels=256,
... index_dims=config.image_size**2,
... ),
... )
>>> model = PerceiverModel(
... config,
... input_preprocessor=preprocessor,
... decoder=PerceiverClassificationDecoder(
... config,
... num_channels=config.d_latents,
... trainable_position_encoding_kwargs=dict(num_channels=config.d_latents, index_dims=1),
... use_query_residual=True,
... ),
... )
>>> # you can then do a forward pass as follows:
>>> image_processor = PerceiverImageProcessor()
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = image_processor(image, return_tensors="pt").pixel_values
>>> with torch.no_grad():
... outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2]
>>> # to train, one can train the model using standard cross-entropy:
>>> criterion = torch.nn.CrossEntropyLoss()
>>> labels = torch.tensor([1])
>>> loss = criterion(logits, labels)
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.input_preprocessor is not None:
inputs, modality_sizes, inputs_without_pos = self.input_preprocessor(
inputs, interpolate_pos_encoding=interpolate_pos_encoding
)
else:
modality_sizes = None
inputs_without_pos = None
if inputs.size()[-1] != self.config.d_model:
raise ValueError(
f"Last dimension of the inputs: {inputs.size()[-1]} doesn't correspond to config.d_model:"
f" {self.config.d_model}. Make sure to set config.d_model appropriately."
)
batch_size, seq_length, _ = inputs.size()
device = inputs.device
# If no attention mask is provided, make them all ones
if attention_mask is None:
attention_mask = torch.ones((batch_size, seq_length), device=device)
# Make the attention mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
extended_attention_mask = self.invert_attention_mask(attention_mask)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_blocks x num_heads]
# and head_mask is converted to shape [num_blocks x batch x num_heads x N x N]
head_mask = self.get_head_mask(head_mask, self.config.num_blocks * self.config.num_self_attends_per_block)
embedding_output = self.embeddings(batch_size=batch_size)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=None,
head_mask=head_mask,
inputs=inputs,
inputs_mask=extended_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
logits = None
if self.decoder:
if subsampled_output_points is not None:
output_modality_sizes = {
"audio": subsampled_output_points["audio"].shape[0],
"image": subsampled_output_points["image"].shape[0],
"label": 1,
}
else:
output_modality_sizes = modality_sizes
decoder_query = self.decoder.decoder_query(
inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_output_points
)
decoder_outputs = self.decoder(
decoder_query,
z=sequence_output,
query_mask=extended_attention_mask,
output_attentions=output_attentions,
)
logits = decoder_outputs.logits
# add cross-attentions of decoder
if output_attentions and decoder_outputs.cross_attentions is not None:
if return_dict:
encoder_outputs.cross_attentions = (
encoder_outputs.cross_attentions + decoder_outputs.cross_attentions
)
else:
encoder_outputs = encoder_outputs + decoder_outputs.cross_attentions
if self.output_postprocessor:
logits = self.output_postprocessor(logits, modality_sizes=output_modality_sizes)
if not return_dict:
if logits is not None:
return (logits, sequence_output) + encoder_outputs[1:]
else:
return (sequence_output,) + encoder_outputs[1:]
return PerceiverModelOutput(
logits=logits,
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
Example use of Perceiver for masked language modeling.
"""
)
class PerceiverForMaskedLM(PerceiverPreTrainedModel):
def __init__(self, config: PerceiverConfig):
super().__init__(config)
text_preprocessor = PerceiverTextPreprocessor(config)
trainable_position_encoding_kwargs_decoder = {
"num_channels": text_preprocessor.num_channels,
"index_dims": config.max_position_embeddings,
}
self.perceiver = PerceiverModel(
config,
input_preprocessor=text_preprocessor,
decoder=PerceiverBasicDecoder(
config,
output_num_channels=config.d_latents,
output_index_dims=config.max_position_embeddings, # we need to define the seq_len of the inputs beforehand
num_channels=text_preprocessor.num_channels,
qk_channels=8 * 32,
v_channels=text_preprocessor.num_channels,
num_heads=8,
use_query_residual=False,
final_project=False,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
),
)
self.embedding_decoder = PerceiverEmbeddingDecoder(config)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
inputs: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
input_ids: Optional[torch.Tensor] = None,
) -> Union[tuple, PerceiverMaskedLMOutput]:
r"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
Examples:
```python
>>> from transformers import AutoTokenizer, PerceiverForMaskedLM
>>> import torch
>>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver")
>>> model = PerceiverForMaskedLM.from_pretrained("deepmind/language-perceiver")
>>> # training
>>> text = "This is an incomplete sentence where some words are missing."
>>> inputs = tokenizer(text, padding="max_length", return_tensors="pt")
>>> # mask " missing."
>>> inputs["input_ids"][0, 52:61] = tokenizer.mask_token_id
>>> labels = tokenizer(text, padding="max_length", return_tensors="pt").input_ids
>>> outputs = model(**inputs, labels=labels)
>>> loss = outputs.loss
>>> round(loss.item(), 2)
19.87
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2048, 262]
>>> # inference
>>> text = "This is an incomplete sentence where some words are missing."
>>> encoding = tokenizer(text, padding="max_length", return_tensors="pt")
>>> # mask bytes corresponding to " missing.". Note that the model performs much better if the masked span starts with a space.
>>> encoding["input_ids"][0, 52:61] = tokenizer.mask_token_id
>>> # forward pass
>>> with torch.no_grad():
... outputs = model(**encoding)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2048, 262]
>>> masked_tokens_predictions = logits[0, 52:61].argmax(dim=-1).tolist()
>>> tokenizer.decode(masked_tokens_predictions)
' missing.'
```"""
if inputs is not None and input_ids is not None:
raise ValueError("You cannot use both `inputs` and `input_ids`")
elif inputs is None and input_ids is not None:
inputs = input_ids
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.embedding_decoder(
outputs.logits if return_dict else outputs[0], embedding_layer=self.perceiver.input_preprocessor.embeddings
)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return PerceiverMaskedLMOutput(
loss=masked_lm_loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
Example use of Perceiver for text classification.
"""
)
class PerceiverForSequenceClassification(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
self.num_labels = config.num_labels
self.perceiver = PerceiverModel(
config,
input_preprocessor=PerceiverTextPreprocessor(config),
decoder=PerceiverClassificationDecoder(
config,
num_channels=config.d_latents,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
use_query_residual=True,
),
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
inputs: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
input_ids: Optional[torch.Tensor] = None,
) -> Union[tuple, PerceiverClassifierOutput]:
r"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the classification/regression loss. Indices should be in `[0, ..., config.num_labels -
1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels >
1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoTokenizer, PerceiverForSequenceClassification
>>> tokenizer = AutoTokenizer.from_pretrained("deepmind/language-perceiver")
>>> model = PerceiverForSequenceClassification.from_pretrained("deepmind/language-perceiver")
>>> text = "hello world"
>>> inputs = tokenizer(text, return_tensors="pt").input_ids
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 2]
```"""
if inputs is not None and input_ids is not None:
raise ValueError("You cannot use both `inputs` and `input_ids`")
elif inputs is None and input_ids is not None:
inputs = input_ids
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return PerceiverClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
Example use of Perceiver for image classification, for tasks such as ImageNet.
This model uses learned position embeddings. In other words, this model is not given any privileged information about
the structure of images. As shown in the paper, this model can achieve a top-1 accuracy of 72.7 on ImageNet.
[`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]
(with `prep_type="conv1x1"`) to preprocess the input images, and
[`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of
[`PerceiverModel`] into classification logits.
"""
)
class PerceiverForImageClassificationLearned(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
trainable_position_encoding_kwargs_preprocessor = {"num_channels": 256, "index_dims": config.image_size**2}
trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
self.num_labels = config.num_labels
self.perceiver = PerceiverModel(
config,
input_preprocessor=PerceiverImagePreprocessor(
config,
prep_type="conv1x1",
spatial_downsample=1,
out_channels=256,
position_encoding_type="trainable",
concat_or_add_pos="concat",
project_pos_dim=256,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_preprocessor,
),
decoder=PerceiverClassificationDecoder(
config,
num_channels=config.d_latents,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
use_query_residual=True,
),
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
inputs: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
pixel_values: Optional[torch.Tensor] = None,
) -> Union[tuple, PerceiverClassifierOutput]:
r"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, PerceiverForImageClassificationLearned
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-learned")
>>> model = PerceiverForImageClassificationLearned.from_pretrained("deepmind/vision-perceiver-learned")
>>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 1000]
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: tabby, tabby cat
```"""
if inputs is not None and pixel_values is not None:
raise ValueError("You cannot use both `inputs` and `pixel_values`")
elif inputs is None and pixel_values is not None:
inputs = pixel_values
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
interpolate_pos_encoding=interpolate_pos_encoding,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return PerceiverClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
Example use of Perceiver for image classification, for tasks such as ImageNet.
This model uses fixed 2D Fourier position embeddings. As shown in the paper, this model can achieve a top-1 accuracy of
79.0 on ImageNet, and 84.5 when pre-trained on a large-scale dataset (i.e. JFT).
[`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]
(with `prep_type="pixels"`) to preprocess the input images, and
[`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of
[`PerceiverModel`] into classification logits.
"""
)
class PerceiverForImageClassificationFourier(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
fourier_position_encoding_kwargs_preprocessor = {
"concat_pos": True,
"max_resolution": (224, 224),
"num_bands": 64,
"sine_only": False,
}
trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
self.num_labels = config.num_labels
self.perceiver = PerceiverModel(
config,
input_preprocessor=PerceiverImagePreprocessor(
config,
prep_type="pixels",
spatial_downsample=1,
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
),
decoder=PerceiverClassificationDecoder(
config,
num_channels=config.d_latents,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
use_query_residual=True,
),
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
inputs: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
pixel_values: Optional[torch.Tensor] = None,
) -> Union[tuple, PerceiverClassifierOutput]:
r"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, PerceiverForImageClassificationFourier
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-fourier")
>>> model = PerceiverForImageClassificationFourier.from_pretrained("deepmind/vision-perceiver-fourier")
>>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 1000]
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: tabby, tabby cat
```"""
if inputs is not None and pixel_values is not None:
raise ValueError("You cannot use both `inputs` and `pixel_values`")
elif inputs is None and pixel_values is not None:
inputs = pixel_values
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return PerceiverClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
Example use of Perceiver for image classification, for tasks such as ImageNet.
This model uses a 2D conv+maxpool preprocessing network. As shown in the paper, this model can achieve a top-1 accuracy
of 82.1 on ImageNet.
[`PerceiverForImageClassificationLearned`] uses [`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`]
(with `prep_type="conv"`) to preprocess the input images, and
[`~models.perceiver.modeling_perceiver.PerceiverClassificationDecoder`] to decode the latent representation of
[`PerceiverModel`] into classification logits.
"""
)
class PerceiverForImageClassificationConvProcessing(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
fourier_position_encoding_kwargs_preprocessor = {
"concat_pos": True,
"max_resolution": (56, 56),
"num_bands": 64,
"sine_only": False,
}
trainable_position_encoding_kwargs_decoder = {"num_channels": config.d_latents, "index_dims": 1}
self.num_labels = config.num_labels
self.perceiver = PerceiverModel(
config,
input_preprocessor=PerceiverImagePreprocessor(
config,
prep_type="conv",
spatial_downsample=1,
position_encoding_type="fourier",
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
),
decoder=PerceiverClassificationDecoder(
config,
num_channels=config.d_latents,
trainable_position_encoding_kwargs=trainable_position_encoding_kwargs_decoder,
use_query_residual=True,
),
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
inputs: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
pixel_values: Optional[torch.Tensor] = None,
) -> Union[tuple, PerceiverClassifierOutput]:
r"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, PerceiverForImageClassificationConvProcessing
>>> from PIL import Image
>>> import requests
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> image_processor = AutoImageProcessor.from_pretrained("deepmind/vision-perceiver-conv")
>>> model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")
>>> inputs = image_processor(images=image, return_tensors="pt").pixel_values
>>> outputs = model(inputs=inputs)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 1000]
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_class_idx = logits.argmax(-1).item()
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
Predicted class: tabby, tabby cat
```"""
if inputs is not None and pixel_values is not None:
raise ValueError("You cannot use both `inputs` and `pixel_values`")
elif inputs is None and pixel_values is not None:
inputs = pixel_values
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return PerceiverClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
Example use of Perceiver for optical flow, for tasks such as Sintel and KITTI. [`PerceiverForOpticalFlow`] uses
[`~models.perceiver.modeling_perceiver.PerceiverImagePreprocessor`] (with *prep_type="patches"*) to preprocess the
input images, and [`~models.perceiver.modeling_perceiver.PerceiverOpticalFlowDecoder`] to decode the latent
representation of [`PerceiverModel`].
As input, one concatenates 2 subsequent frames along the channel dimension and extract a 3 x 3 patch around each pixel
(leading to 3 x 3 x 3 x 2 = 54 values for each pixel). Fixed Fourier position encodings are used to encode the position
of each pixel in the patch. Next, one applies the Perceiver encoder. To decode, one queries the latent representation
using the same encoding used for the input.
"""
)
class PerceiverForOpticalFlow(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
fourier_position_encoding_kwargs_preprocessor = {
"num_bands": 64,
"max_resolution": config.train_size,
"sine_only": False,
"concat_pos": True,
}
fourier_position_encoding_kwargs_decoder = {
"concat_pos": True,
"max_resolution": config.train_size,
"num_bands": 64,
"sine_only": False,
}
image_preprocessor = PerceiverImagePreprocessor(
config,
prep_type="patches",
spatial_downsample=1,
conv_after_patching=True,
conv_after_patching_in_channels=54,
temporal_downsample=2,
position_encoding_type="fourier",
# position_encoding_kwargs
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
)
self.perceiver = PerceiverModel(
config,
input_preprocessor=image_preprocessor,
decoder=PerceiverOpticalFlowDecoder(
config,
num_channels=image_preprocessor.num_channels,
output_image_shape=config.train_size,
rescale_factor=100.0,
# decoder kwargs
use_query_residual=False,
output_num_channels=2,
# We query the decoder using the first frame features
# rather than a standard decoder position encoding.
position_encoding_type="fourier",
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_decoder,
),
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
inputs: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, PerceiverClassifierOutput]:
r"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the optical flow loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Examples:
```python
>>> from transformers import PerceiverForOpticalFlow
>>> import torch
>>> model = PerceiverForOpticalFlow.from_pretrained("deepmind/optical-flow-perceiver")
>>> # in the Perceiver IO paper, the authors extract a 3 x 3 patch around each pixel,
>>> # leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels)
>>> # patches have shape (batch_size, num_frames, num_channels, height, width)
>>> # the authors train on resolutions of 368 x 496
>>> patches = torch.randn(1, 2, 27, 368, 496)
>>> outputs = model(inputs=patches)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 368, 496, 2]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
loss = None
if labels is not None:
raise NotImplementedError("Optical flow training is not yet supported")
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return PerceiverClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
Example use of Perceiver for multimodal (video) autoencoding, for tasks such as Kinetics-700.
[`PerceiverForMultimodalAutoencoding`] uses [`~models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor`] to
preprocess the 3 modalities: images, audio and class labels. This preprocessor uses modality-specific preprocessors to
preprocess every modality separately, after which they are concatenated. Trainable position embeddings are used to pad
each modality to the same number of channels to make concatenation along the time dimension possible. Next, one applies
the Perceiver encoder.
[`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] is used to decode the latent representation of
[`PerceiverModel`]. This decoder uses each modality-specific decoder to construct queries. The decoder queries are
created based on the inputs after preprocessing. However, autoencoding an entire video in a single forward pass is
computationally infeasible, hence one only uses parts of the decoder queries to do cross-attention with the latent
representation. This is determined by the subsampled indices for each modality, which can be provided as additional
input to the forward pass of [`PerceiverForMultimodalAutoencoding`].
[`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] also pads the decoder queries of the different
modalities to the same number of channels, in order to concatenate them along the time dimension. Next, cross-attention
is performed with the latent representation of [`PerceiverModel`].
Finally, [`~models.perceiver.modeling_perceiver.PerceiverMultiModalPostprocessor`] is used to turn this tensor into an
actual video. It first splits up the output into the different modalities, and then applies the respective
postprocessor for each modality.
Note that, by masking the classification label during evaluation (i.e. simply providing a tensor of zeros for the
"label" modality), this auto-encoding model becomes a Kinetics 700 video classifier.
"""
)
class PerceiverForMultimodalAutoencoding(PerceiverPreTrainedModel):
def __init__(self, config: PerceiverConfig):
super().__init__(config)
n_audio_samples = config.num_frames * config.audio_samples_per_frame
input_preprocessor = PerceiverMultimodalPreprocessor(
min_padding_size=4,
modalities={
"audio": PerceiverAudioPreprocessor(
config,
position_encoding_type="fourier",
fourier_position_encoding_kwargs={
"num_bands": 192,
"max_resolution": (n_audio_samples,),
"sine_only": False,
"concat_pos": True,
},
prep_type="patches",
samples_per_patch=config.samples_per_patch,
),
"image": PerceiverImagePreprocessor(
config,
position_encoding_type="fourier",
fourier_position_encoding_kwargs={
"num_bands": 32,
"max_resolution": (config.num_frames, config.image_size, config.image_size),
"sine_only": False,
"concat_pos": True,
},
prep_type="patches",
spatial_downsample=4,
temporal_downsample=1,
),
"label": PerceiverOneHotPreprocessor(config),
},
mask_probs={"image": 0.0, "audio": 0.0, "label": 1.0},
)
image_decoder = PerceiverBasicVideoAutoencodingDecoder(
config,
# Autoencoding, don't pass inputs to the queries.
concat_preprocessed_input=False,
output_shape=config.output_shape,
output_num_channels=config.output_num_channels,
use_query_residual=False,
position_encoding_only=True,
position_encoding_type="fourier",
fourier_position_encoding_kwargs={
"num_bands": 32,
"max_resolution": (config.num_frames, config.image_size, config.image_size),
"sine_only": False,
"concat_pos": True,
},
)
decoder = PerceiverMultimodalDecoder(
config,
# Autoencoding, don't pass inputs to the queries.
concat_preprocessed_input=False,
# Modality specific decoders are used ONLY to generate queries.
# All modalties are decoded together using a unified decoder.
modalities={
"audio": PerceiverBasicDecoder(
config,
# Autoencoding, don't pass inputs to the queries.
concat_preprocessed_input=False,
output_index_dims=(n_audio_samples // config.samples_per_patch,),
output_num_channels=config.output_num_channels,
use_query_residual=False,
position_encoding_only=True,
position_encoding_type="fourier",
fourier_position_encoding_kwargs={
"num_bands": 192,
"max_resolution": (n_audio_samples,),
"sine_only": False,
"concat_pos": True,
},
),
"image": image_decoder,
"label": PerceiverClassificationDecoder(
config,
# Autoencoding, don't pass inputs to the queries.
concat_preprocessed_input=False,
use_query_residual=False,
position_encoding_only=True,
position_encoding_type="trainable",
trainable_position_encoding_kwargs={
"num_channels": config._label_trainable_num_channels,
"index_dims": 1,
},
),
},
num_outputs=None,
output_num_channels=config.output_num_channels,
use_query_residual=False,
)
output_postprocessor = PerceiverMultimodalPostprocessor(
modalities={
"audio": PerceiverAudioPostprocessor(config, in_channels=config.output_num_channels),
"image": PerceiverProjectionPostprocessor(in_channels=config.output_num_channels, out_channels=3),
"label": PerceiverClassificationPostprocessor(config, in_channels=config.output_num_channels),
}
)
self.perceiver = PerceiverModel(
config,
input_preprocessor=input_preprocessor,
decoder=decoder,
output_postprocessor=output_postprocessor,
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
inputs: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
subsampled_output_points: Optional[dict[str, torch.Tensor]] = None,
head_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, PerceiverClassifierOutput]:
r"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
subsampled_output_points (`dict[str, torch.Tensor]`, *optional*):
Dictionary of tensors used as queries for the decoder. The decoder maps these queries to the latent
representation of the model. Used for subsampled decoding, e.g. when only decoding certain image patches.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import PerceiverForMultimodalAutoencoding
>>> import torch
>>> import numpy as np
>>> # create multimodal inputs
>>> images = torch.randn((1, 16, 3, 224, 224))
>>> audio = torch.randn((1, 30720, 1))
>>> inputs = dict(image=images, audio=audio, label=torch.zeros((images.shape[0], 700)))
>>> model = PerceiverForMultimodalAutoencoding.from_pretrained("deepmind/multimodal-perceiver")
>>> # in the Perceiver IO paper, videos are auto-encoded in chunks
>>> # each chunk subsamples different index dimensions of the image and audio modality decoder queries
>>> nchunks = 128
>>> image_chunk_size = np.prod((16, 224, 224)) // nchunks
>>> audio_chunk_size = audio.shape[1] // model.config.samples_per_patch // nchunks
>>> # process the first chunk
>>> chunk_idx = 0
>>> subsampling = {
... "image": torch.arange(image_chunk_size * chunk_idx, image_chunk_size * (chunk_idx + 1)),
... "audio": torch.arange(audio_chunk_size * chunk_idx, audio_chunk_size * (chunk_idx + 1)),
... "label": None,
... }
>>> outputs = model(inputs=inputs, subsampled_output_points=subsampling)
>>> logits = outputs.logits
>>> list(logits["audio"].shape)
[1, 240]
>>> list(logits["image"].shape)
[1, 6272, 3]
>>> list(logits["label"].shape)
[1, 700]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
loss = None
if labels is not None:
raise NotImplementedError("Multimodal autoencoding training is not yet supported")
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
subsampled_output_points=subsampled_output_points,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return PerceiverClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
# Below: position encodings
def build_position_encoding(
position_encoding_type,
out_channels=None,
project_pos_dim=-1,
trainable_position_encoding_kwargs=None,
fourier_position_encoding_kwargs=None,
):
"""
Builds the position encoding.
Args:
- out_channels: refers to the number of channels of the position encodings.
- project_pos_dim: if specified, will project the position encodings to this dimension.
"""
if position_encoding_type == "trainable":
if not trainable_position_encoding_kwargs:
raise ValueError("Make sure to pass trainable_position_encoding_kwargs")
output_pos_enc = PerceiverTrainablePositionEncoding(**trainable_position_encoding_kwargs)
elif position_encoding_type == "fourier":
# We don't use the index_dims argument, as this is only known during the forward pass
if not fourier_position_encoding_kwargs:
raise ValueError("Make sure to pass fourier_position_encoding_kwargs")
output_pos_enc = PerceiverFourierPositionEncoding(**fourier_position_encoding_kwargs)
else:
raise ValueError(f"Unknown position encoding type: {position_encoding_type}.")
# Optionally, project the position encoding to a target dimension:
positions_projection = nn.Linear(out_channels, project_pos_dim) if project_pos_dim > 0 else nn.Identity()
return output_pos_enc, positions_projection
# Below: Perceiver decoders
class PerceiverAbstractDecoder(nn.Module, metaclass=abc.ABCMeta):
"""Perceiver abstract decoder."""
@abc.abstractmethod
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
raise NotImplementedError
@property
@abc.abstractmethod
def num_query_channels(self):
raise NotImplementedError
@abc.abstractmethod
def forward(self, query, z, query_mask=None):
raise NotImplementedError
class PerceiverProjectionDecoder(PerceiverAbstractDecoder):
"""
Baseline projection decoder (no cross-attention).
Args:
config ([`PerceiverConfig`]):
Model configuration.
"""
def __init__(self, config):
super().__init__()
self.classifier = nn.Linear(config.d_latents, config.num_labels)
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
return None
def forward(
self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None
) -> torch.FloatTensor:
# (batch_size, num_latents, d_latents) -> (batch_size, d_latents)
z = torch.mean(z, dim=1)
# (batch_size, d_latents) -> (batch_size, config.num_labels)
logits = self.classifier(z)
return logits
class PerceiverBasicDecoder(PerceiverAbstractDecoder):
"""
Cross-attention-based decoder. This class can be used to decode the final hidden states of the latents using a
cross-attention operation, in which the latents produce keys and values.
The shape of the output of this class depends on how one defines the output queries (also called decoder queries).
Args:
config ([*PerceiverConfig*]):
Model configuration.
output_num_channels (`int`, *optional*):
The number of channels in the output. Will only be used in case *final_project* is set to `True`.
position_encoding_type (`str`, *optional*, defaults to "trainable"):
The type of position encoding to use. Can be either "trainable", "fourier", or "none".
output_index_dims (`int`, *optional*):
The number of dimensions of the output queries. Ignored if 'position_encoding_type' == 'none'.
num_channels (`int`, *optional*, defaults to 128):
The number of channels of the decoder queries. Ignored if 'position_encoding_type' == 'none'.
qk_channels (`int`, *optional*):
The number of channels of the queries and keys in the cross-attention layer.
v_channels (`int`, *optional*):
The number of channels of the values in the cross-attention layer.
num_heads (`int`, *optional*, defaults to 1):
The number of attention heads in the cross-attention layer.
widening_factor (`int`, *optional*, defaults to 1):
The widening factor of the cross-attention layer.
use_query_residual (`bool`, *optional*, defaults to `False`):
Whether to use a residual connection between the query and the output of the cross-attention layer.
concat_preprocessed_input (`bool`, *optional*, defaults to `False`):
Whether to concatenate the preprocessed input to the query.
final_project (`bool`, *optional*, defaults to `True`):
Whether to project the output of the cross-attention layer to a target dimension.
position_encoding_only (`bool`, *optional*, defaults to `False`):
Whether to only use this class to define output queries.
"""
def __init__(
self,
config: PerceiverConfig,
output_num_channels: int,
position_encoding_type: Optional[str] = "trainable",
# The following 2 arguments are ignored if position_encoding_type == 'none':
output_index_dims: Optional[int] = None,
num_channels: Optional[int] = 128,
subsampled_index_dims: Optional[int] = None,
qk_channels: Optional[int] = None,
v_channels: Optional[int] = None,
num_heads: Optional[int] = 1,
widening_factor: Optional[int] = 1,
use_query_residual: Optional[bool] = False,
concat_preprocessed_input: Optional[bool] = False,
final_project: Optional[bool] = True,
position_encoding_only: Optional[bool] = False,
**position_encoding_kwargs,
) -> None:
super().__init__()
self.output_num_channels = output_num_channels
# If `none`, the decoder will not construct any position encodings.
# You should construct your own when querying the decoder.
self.output_position_encodings = None
self.position_encoding_type = position_encoding_type
self.position_encoding_kwargs = position_encoding_kwargs
if position_encoding_type != "none":
self.output_position_encodings, self.positions_projection = build_position_encoding(
position_encoding_type=position_encoding_type, **position_encoding_kwargs
)
self.output_index_dims = output_index_dims
self.num_channels = num_channels
if subsampled_index_dims is None:
subsampled_index_dims = output_index_dims
self.subsampled_index_dims = subsampled_index_dims
self.concat_preprocessed_input = concat_preprocessed_input
self.final_project = final_project
self.position_encoding_only = position_encoding_only
# for multimodal autoencoding, we don't need the decoder cross-attention and final layer
# so then we will set position_encoding_only to True
if not self.position_encoding_only:
self.decoding_cross_attention = PerceiverLayer(
config,
is_cross_attention=True,
qk_channels=qk_channels,
v_channels=v_channels,
num_heads=num_heads,
q_dim=num_channels,
kv_dim=config.d_latents,
widening_factor=widening_factor,
use_query_residual=use_query_residual,
)
self.final_layer = nn.Linear(num_channels, output_num_channels) if final_project else nn.Identity()
@property
def num_query_channels(self) -> int:
if self.position_encoding_type == "none": # Queries come from elsewhere
raise ValueError(
"You cannot calculate number of decoder query channels when position_encoding_type is set to none"
)
if self.position_encoding_only:
if "project_pos_dim" in self.position_encoding_kwargs:
return self.position_encoding_kwargs["project_pos_dim"]
return self.output_position_encodings.output_size()
if self.final_project:
return self.output_num_channels
return self.num_channels
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
if self.position_encoding_type == "none": # Queries come from elsewhere
raise ValueError("You cannot construct decoder queries when position_encoding_type is set to none")
if subsampled_points is not None:
# subsampled_points are the indices if the inputs would be flattened
# however, the inputs aren't flattened, that's why we use unravel_index
# to get the indices for the unflattened array
# unravel_index returns a tuple (x_idx, y_idx, ...)
# stack to get the [n, d] tensor of coordinates
indices = [torch.from_numpy(x) for x in np.unravel_index(subsampled_points.cpu(), self.output_index_dims)]
pos = torch.stack(indices, dim=1)
batch_size = inputs.shape[0]
# Map these coordinates to [-1, 1]
pos = -1 + 2 * pos / torch.tensor(self.output_index_dims)[None, :]
pos = torch.broadcast_to(pos[None], [batch_size, pos.shape[0], pos.shape[1]])
# Construct the position encoding.
if self.position_encoding_type == "trainable":
pos_emb = self.output_position_encodings(batch_size)
elif self.position_encoding_type == "fourier":
pos_emb = self.output_position_encodings(
self.output_index_dims, batch_size=batch_size, device=inputs.device, dtype=inputs.dtype, pos=pos
)
# Optionally project them to a target dimension.
pos_emb = self.positions_projection(pos_emb)
pos_emb = torch.reshape(pos_emb, [pos_emb.shape[0], -1, pos_emb.shape[-1]])
else:
batch_size = inputs.shape[0]
index_dims = inputs.shape[2:]
# Construct the position encoding.
if self.position_encoding_type == "trainable":
pos_emb = self.output_position_encodings(batch_size)
elif self.position_encoding_type == "fourier":
pos_emb = self.output_position_encodings(
index_dims, batch_size, device=inputs.device, dtype=inputs.dtype
)
# Optionally project them to a target dimension.
pos_emb = self.positions_projection(pos_emb)
if self.concat_preprocessed_input:
if inputs_without_pos is None:
raise ValueError("Value is required for inputs_without_pos if concat_preprocessed_input is True")
pos_emb = torch.cat([inputs_without_pos, pos_emb], dim=-1)
return pos_emb
def forward(
self,
query: torch.Tensor,
z: torch.FloatTensor,
query_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> PerceiverDecoderOutput:
# Cross-attention decoding.
# key, value: B x N x K; query: B x M x K
# Attention maps -> B x N x M
# Output -> B x M x K
cross_attentions = () if output_attentions else None
layer_outputs = self.decoding_cross_attention(
query,
attention_mask=query_mask,
head_mask=None,
inputs=z,
inputs_mask=None,
output_attentions=output_attentions,
)
output = layer_outputs[0]
if output_attentions:
cross_attentions = cross_attentions + (layer_outputs[1],)
logits = self.final_layer(output)
return PerceiverDecoderOutput(logits=logits, cross_attentions=cross_attentions)
class PerceiverClassificationDecoder(PerceiverAbstractDecoder):
"""
Cross-attention based classification decoder. Light-weight wrapper of [`PerceiverBasicDecoder`] for logit output.
Will turn the output of the Perceiver encoder which is of shape (batch_size, num_latents, d_latents) to a tensor of
shape (batch_size, num_labels). The queries are of shape (batch_size, 1, num_labels).
Args:
config ([`PerceiverConfig`]):
Model configuration.
"""
def __init__(self, config, **decoder_kwargs):
super().__init__()
self.num_labels = config.num_labels
self.decoder = PerceiverBasicDecoder(
config,
output_num_channels=self.num_labels,
output_index_dims=1, # Predict a single logit array.
**decoder_kwargs,
)
@property
def num_query_channels(self) -> int:
return self.decoder.num_query_channels
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
return self.decoder.decoder_query(
inputs, modality_sizes, inputs_without_pos, subsampled_points=subsampled_points
)
def forward(
self,
query: torch.Tensor,
z: torch.FloatTensor,
query_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> PerceiverDecoderOutput:
decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
# B x 1 x num_classes -> B x num_classes
logits = decoder_outputs.logits[:, 0, :]
return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)
class PerceiverOpticalFlowDecoder(PerceiverAbstractDecoder):
"""Cross-attention based optical flow decoder."""
def __init__(self, config, output_image_shape, output_num_channels=2, rescale_factor=100.0, **decoder_kwargs):
super().__init__()
self.output_image_shape = output_image_shape
self.output_num_channels = output_num_channels
self.rescale_factor = rescale_factor
self.decoder = PerceiverBasicDecoder(config, output_num_channels=output_num_channels, **decoder_kwargs)
@property
def num_query_channels(self) -> int:
return self.decoder.num_query_channels
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
if subsampled_points is not None:
raise ValueError("FlowDecoder doesn't support subsampling yet.")
return inputs
def forward(
self,
query: torch.Tensor,
z: torch.FloatTensor,
query_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> PerceiverDecoderOutput:
decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
preds = decoder_outputs.logits
# Output flow and rescale.
preds /= self.rescale_factor
preds = preds.reshape([preds.shape[0]] + list(self.output_image_shape) + [preds.shape[-1]])
return PerceiverDecoderOutput(logits=preds, cross_attentions=decoder_outputs.cross_attentions)
class PerceiverBasicVideoAutoencodingDecoder(PerceiverAbstractDecoder):
"""
Cross-attention based video-autoencoding decoder. Light-weight wrapper of [*PerceiverBasicDecoder*] with video
reshaping logic.
Args:
config ([*PerceiverConfig*]):
Model configuration.
output_shape (`list[int]`):
Shape of the output as (batch_size, num_frames, height, width), excluding the channel dimension.
position_encoding_type (`str`):
The type of position encoding to use. Can be either "trainable", "fourier", or "none".
"""
def __init__(
self, config: PerceiverConfig, output_shape: list[int], position_encoding_type: str, **decoder_kwargs
) -> None:
super().__init__()
if len(output_shape) != 4: # B, T, H, W
raise ValueError(f"Expected rank 4 output_shape, got {output_shape}.")
# Build the decoder components:
self.output_shape = output_shape
self.output_num_channels = decoder_kwargs["output_num_channels"]
self.decoder = PerceiverBasicDecoder(
config,
output_index_dims=self.output_shape[1:4], # T*H*W
position_encoding_type=position_encoding_type,
**decoder_kwargs,
)
@property
def num_query_channels(self) -> int:
return self.decoder.num_query_channels
def decoder_query(self, inputs, modality_sizes=None, inputs_without_pos=None, subsampled_points=None):
return self.decoder.decoder_query(
inputs,
modality_sizes=modality_sizes,
inputs_without_pos=inputs_without_pos,
subsampled_points=subsampled_points,
)
def forward(
self, query: torch.Tensor, z: torch.FloatTensor, query_mask: Optional[torch.FloatTensor] = None
) -> PerceiverDecoderOutput:
decoder_outputs = self.decoder(query, z)
logits = decoder_outputs.logits
logits = torch.reshape(logits, self.output_shape + [logits.shape[-1]])
return PerceiverDecoderOutput(logits=logits, cross_attentions=decoder_outputs.cross_attentions)
def restructure(modality_sizes: ModalitySizeType, inputs: torch.Tensor) -> Mapping[str, torch.Tensor]:
"""
Partitions a [B, N, C] tensor into tensors for each modality.
Args:
modality_sizes
dict specifying the size of the modality
inputs:
input tensor
Returns:
dict mapping name of modality to its associated tensor.
"""
outputs = {}
index = 0
# Apply a predictable ordering to the modalities
for modality in sorted(modality_sizes.keys()):
size = modality_sizes[modality]
inp = inputs[:, index : index + size]
index += size
outputs[modality] = inp
return outputs
class PerceiverMultimodalDecoder(PerceiverAbstractDecoder):
"""
Multimodal decoding by composing uni-modal decoders. The *modalities* argument of the constructor is a dictionary
mapping modality name to the decoder of that modality. That decoder will be used to construct queries for that
modality. Modality-specific queries are padded with trainable modality-specific parameters, after which they are
concatenated along the time dimension.
Next, there is a shared cross attention operation across all modalities.
Args:
config ([*PerceiverConfig*]):
Model configuration.
modalities (`dict[str, PerceiverAbstractDecoder]`):
Dictionary mapping modality name to the decoder of that modality.
num_outputs (`int`):
The number of outputs of the decoder.
output_num_channels (`int`):
The number of channels in the output.
min_padding_size (`int`, *optional*, defaults to 2):
The minimum padding size for all modalities. The final output will have num_channels equal to the maximum
channels across all modalities plus min_padding_size.
subsampled_index_dims (`dict[str, PerceiverAbstractDecoder]`, *optional*):
Dictionary mapping modality name to the subsampled index dimensions to use for the decoder query of that
modality.
"""
def __init__(
self,
config: PerceiverConfig,
modalities: dict[str, PerceiverAbstractDecoder],
num_outputs: int,
output_num_channels: int,
min_padding_size: Optional[int] = 2,
subsampled_index_dims: Optional[dict[str, PerceiverAbstractDecoder]] = None,
**decoder_kwargs,
) -> None:
super().__init__()
self.modalities = nn.ModuleDict(modalities)
self.subsampled_index_dims = subsampled_index_dims
self.min_padding_size = min_padding_size
self.output_num_channels = output_num_channels
self.num_outputs = num_outputs
self.decoder = PerceiverBasicDecoder(
config,
output_index_dims=(num_outputs,),
output_num_channels=output_num_channels,
position_encoding_type="none",
num_channels=self.num_query_channels,
**decoder_kwargs,
)
self.padding = nn.ParameterDict(
{
modality: nn.Parameter(torch.randn(1, self.num_query_channels - decoder.num_query_channels))
for modality, decoder in modalities.items()
}
)
@property
def num_query_channels(self) -> int:
max_channel_size = max(decoder.num_query_channels for _, decoder in self.modalities.items())
common_channel_size = max_channel_size + self.min_padding_size
return common_channel_size
def decoder_query(self, inputs, modality_sizes, inputs_without_pos=None, subsampled_points=None):
# Partition the flat inputs among the different modalities
inputs = restructure(modality_sizes, inputs)
# Obtain modality-specific decoders' queries
subsampled_points = subsampled_points or {}
decoder_queries = {}
for modality, decoder in self.modalities.items():
# Get input_without_pos for this modality if it exists.
input_without_pos = None
if inputs_without_pos is not None:
input_without_pos = inputs_without_pos.get(modality, None)
query = decoder.decoder_query(
inputs=inputs[modality],
modality_sizes=None,
inputs_without_pos=input_without_pos,
subsampled_points=subsampled_points.get(modality, None),
)
decoder_queries[modality] = query
# Pad all queries with trainable position encodings to make them have the same channels
def embed(modality, x):
x = torch.reshape(x, [x.shape[0], np.prod(x.shape[1:-1]), x.shape[-1]])
pos = self.padding[modality]
pos = torch.broadcast_to(pos, [x.shape[0], x.shape[1], self.num_query_channels - x.shape[2]])
return torch.cat([x, pos], dim=2)
# Apply a predictable ordering to the modalities
return torch.cat(
[embed(modality, decoder_queries[modality]) for modality in sorted(self.modalities.keys())], dim=1
)
def forward(
self,
query: torch.Tensor,
z: torch.FloatTensor,
query_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> torch.Tensor:
# B x 1 x num_classes -> B x num_classes
decoder_outputs = self.decoder(query, z, output_attentions=output_attentions)
return decoder_outputs
# Below: IO pre- and post-processor classes for Perceiver.
def space_to_depth(frames: torch.Tensor, temporal_block_size: int = 1, spatial_block_size: int = 1) -> torch.Tensor:
"""
Space to depth transform. Rearranges blocks of spatial data, into depth.
This function assumes the channels to be first, but will place the channels last after transformation.
Based on https://discuss.pytorch.org/t/is-there-any-layer-like-tensorflows-space-to-depth-function/3487/15.
"""
if len(frames.shape) == 4:
batch_size, num_channels, height, width = frames.shape
# split up dimensions (height by spatial_block_size, width by spatial_block_size)
frames = frames.view(
batch_size,
num_channels,
height // spatial_block_size,
spatial_block_size,
width // spatial_block_size,
spatial_block_size,
)
# move blocks to last dimension: (batch_size, H//bs, W//bs, bs, bs, C)
frames = frames.permute(0, 2, 4, 3, 5, 1).contiguous()
# concatenate blocks along channel dimension: (batch_size, H//bs, W//bs, bs*bs*C)
frames = frames.view(
batch_size,
height // spatial_block_size,
width // spatial_block_size,
(spatial_block_size**2) * num_channels,
)
return frames
elif len(frames.shape) == 5:
batch_size, time, num_channels, height, width = frames.shape
# split up dimensions (time by temporal_block_size, height by spatial_block_size, width by spatial_block_size)
frames = frames.view(
batch_size,
time // temporal_block_size,
temporal_block_size,
num_channels,
height // spatial_block_size,
spatial_block_size,
width // spatial_block_size,
spatial_block_size,
)
# move blocks to last dimension: (batch_size, T//ts, H//bs, W//bs, ts, bs, bs, C)
frames = frames.permute(0, 1, 4, 6, 2, 5, 7, 3).contiguous()
# concatenate blocks along channel dimension: (batch_size, T//ts, H//bs, W//bs, ts*bs*bs*C)
frames = frames.view(
batch_size,
time // temporal_block_size,
height // spatial_block_size,
width // spatial_block_size,
temporal_block_size * (spatial_block_size**2) * num_channels,
)
return frames
else:
raise ValueError(
"Frames should be of rank 4 (batch, channels, height, width)"
" or rank 5 (batch, time, channels, height, width)"
)
class Conv2dSamePadding(nn.Conv2d):
"""
Conv2d layer with padding="same" support. Source:
https://gist.github.com/sumanmichael/4de9dee93f972d47c80c4ade8e149ea6
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.zero_pad_2d = nn.ZeroPad2d(
reduce(__add__, [(k // 2 + (k - 2 * (k // 2)) - 1, k // 2) for k in self.kernel_size[::-1]])
)
def forward(self, input):
return self._conv_forward(self.zero_pad_2d(input), self.weight, self.bias)
class Conv2DDownsample(nn.Module):
"""Downsamples 4x by applying a 2D convolution and doing max pooling."""
def __init__(
self,
num_layers: int = 1,
in_channels: int = 3,
out_channels: int = 64,
use_batchnorm: bool = True,
):
"""
Constructs a Conv2DDownsample model.
Args:
in_channels (`int`, *optional*, defaults to 3):
The number of input channels.
out_channels (`int`, *optional*, defaults to 64):
The number of conv output channels.
use_batchnorm (`bool`, *optional*, defaults to `True`):
Whether to use batchnorm.
"""
super().__init__()
self.conv = Conv2dSamePadding(
in_channels=in_channels, out_channels=out_channels, kernel_size=7, stride=2, bias=False
)
self.batchnorm = nn.BatchNorm2d(num_features=out_channels) if use_batchnorm else nn.Identity()
self.relu = nn.ReLU()
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2)
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
out = self.conv(inputs)
out = self.batchnorm(out)
out = self.relu(out)
out = self.max_pool(out)
return out
def generate_fourier_features(pos, num_bands, max_resolution=(224, 224), concat_pos=True, sine_only=False):
"""
Generate a Fourier frequency position encoding with linear spacing.
Args:
pos (`torch.LongTensor` of shape `(batch_size, sequence_length, dim)`):
The Tensor containing the position of n points in d dimensional space.
num_bands (`int`):
The number of frequency bands (K) to use.
max_resolution (`tuple[int]`, *optional*, defaults to (224, 224)):
The maximum resolution (i.e. the number of pixels per dim). A tuple representing resolution for each dimension.
concat_pos (`bool`, *optional*, defaults to `True`):
Whether to concatenate the input position encoding to the Fourier features.
sine_only (`bool`, *optional*, defaults to `False`):
Whether to use a single phase (sin) or two (sin/cos) for each frequency band.
Returns:
`torch.FloatTensor` of shape `(batch_size, sequence_length, n_channels)`: The Fourier position embeddings. If
`concat_pos` is `True` and `sine_only` is `False`, output dimensions are ordered as: [dim_1, dim_2, ..., dim_d,
sin(pi*f_1*dim_1), ..., sin(pi*f_K*dim_1), ..., sin(pi*f_1*dim_d), ..., sin(pi*f_K*dim_d), cos(pi*f_1*dim_1),
..., cos(pi*f_K*dim_1), ..., cos(pi*f_1*dim_d), ..., cos(pi*f_K*dim_d)], where dim_i is pos[:, i] and f_k is the
kth frequency band.
"""
batch_size = pos.shape[0]
min_freq = 1.0
# Nyquist frequency at the target resolution:
freq_bands = torch.stack(
[torch.linspace(start=min_freq, end=res / 2, steps=num_bands) for res in max_resolution], dim=0
)
# Get frequency bands for each spatial dimension.
# Output is size [n, d * num_bands]
per_pos_features = pos[0, :, :][:, :, None] * freq_bands[None, :, :]
per_pos_features = torch.reshape(per_pos_features, [-1, np.prod(per_pos_features.shape[1:])])
if sine_only:
# Output is size [n, d * num_bands]
per_pos_features = torch.sin(np.pi * (per_pos_features))
else:
# Output is size [n, 2 * d * num_bands]
per_pos_features = torch.cat(
[torch.sin(np.pi * per_pos_features), torch.cos(np.pi * per_pos_features)], dim=-1
)
# Concatenate the raw input positions.
if concat_pos:
# Adds d bands to the encoding.
per_pos_features = torch.cat([pos, per_pos_features.expand(batch_size, -1, -1)], dim=-1)
return per_pos_features
def build_linear_positions(index_dims, output_range=(-1.0, 1.0)):
"""
Generate an array of position indices for an N-D input array.
Args:
index_dims (`list[int]`):
The shape of the index dimensions of the input array.
output_range (`tuple[float]`, *optional*, defaults to `(-1.0, 1.0)`):
The min and max values taken by each input index dimension.
Returns:
`torch.FloatTensor` of shape `(index_dims[0], index_dims[1], .., index_dims[-1], N)`.
"""
def _linspace(n_xels_per_dim):
return torch.linspace(start=output_range[0], end=output_range[1], steps=n_xels_per_dim, dtype=torch.float32)
dim_ranges = [_linspace(n_xels_per_dim) for n_xels_per_dim in index_dims]
array_index_grid = meshgrid(*dim_ranges, indexing="ij")
return torch.stack(array_index_grid, dim=-1)
class PerceiverAbstractPositionEncoding(nn.Module, metaclass=abc.ABCMeta):
"""Perceiver abstract position encoding."""
@property
@abc.abstractmethod
def num_dimensions(self) -> int:
raise NotImplementedError
@abc.abstractmethod
def output_size(self, *args, **kwargs) -> int:
raise NotImplementedError
@abc.abstractmethod
def forward(self, batch_size, pos):
raise NotImplementedError
class PerceiverTrainablePositionEncoding(PerceiverAbstractPositionEncoding):
"""Trainable position encoding."""
def __init__(self, index_dims, num_channels=128):
super().__init__()
self._num_channels = num_channels
self._index_dims = index_dims
index_dim = np.prod(index_dims)
self.position_embeddings = nn.Parameter(torch.randn(index_dim, num_channels))
@property
def num_dimensions(self) -> int:
if isinstance(self._index_dims, int):
return 1
return len(self._index_dims)
def output_size(self, *args, **kwargs) -> int:
return self._num_channels
def interpolate_pos_encoding(self, position_embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
num_positions = position_embeddings.shape[0]
new_height = new_width = torch_int(num_positions**0.5)
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and height == new_height and width == new_width:
return position_embeddings
position_embeddings = position_embeddings.reshape(1, new_height, new_width, self._num_channels).permute(
0, 3, 1, 2
)
position_embeddings = nn.functional.interpolate(
position_embeddings,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
position_embeddings = position_embeddings.reshape(1, self._num_channels, -1).permute(0, 2, 1).squeeze(0)
return position_embeddings
def forward(
self, batch_size: int, interpolate_pos_encoding: bool = False, input_size: torch.Size = None
) -> torch.Tensor:
position_embeddings = self.position_embeddings
if interpolate_pos_encoding:
height, width = input_size
position_embeddings = self.interpolate_pos_encoding(position_embeddings, height, width)
if batch_size is not None:
position_embeddings = position_embeddings.expand(batch_size, -1, -1)
return position_embeddings
def _check_or_build_spatial_positions(pos, index_dims, batch_size):
"""
Checks or builds spatial position features (x, y, ...).
Args:
pos (`torch.FloatTensor`):
None, or an array of position features. If None, position features are built. Otherwise, their size is checked.
index_dims (`list[int]`):
An iterable giving the spatial/index size of the data to be featurized.
batch_size (`int`):
The batch size of the data to be featurized.
Returns:
`torch.FloatTensor` of shape `(batch_size, prod(index_dims))` an array of position features.
"""
if pos is None:
pos = build_linear_positions(index_dims)
# equivalent to `torch.broadcast_to(pos[None], (batch_size,) + pos.shape)`
# but `torch.broadcast_to` cannot be converted to ONNX
pos = pos[None].expand((batch_size,) + pos.shape)
pos = torch.reshape(pos, [batch_size, np.prod(index_dims), -1])
else:
# Just a warning label: you probably don't want your spatial features to
# have a different spatial layout than your pos coordinate system.
# But feel free to override if you think it'll work!
if pos.shape[-1] != len(index_dims):
raise ValueError("Spatial features have the wrong number of dimensions.")
return pos
class PerceiverFourierPositionEncoding(PerceiverAbstractPositionEncoding):
"""Fourier (Sinusoidal) position encoding."""
def __init__(self, num_bands, max_resolution, concat_pos=True, sine_only=False):
super().__init__()
self.num_bands = num_bands
self.max_resolution = max_resolution
self.concat_pos = concat_pos
self.sine_only = sine_only
@property
def num_dimensions(self) -> int:
return len(self.max_resolution)
def output_size(self):
"""Returns size of positional encodings last dimension."""
num_dims = len(self.max_resolution)
encoding_size = self.num_bands * num_dims
if not self.sine_only:
encoding_size *= 2
if self.concat_pos:
encoding_size += self.num_dimensions
return encoding_size
def forward(
self,
index_dims: list[int],
batch_size: int,
device: torch.device,
dtype: torch.dtype,
pos: Optional[torch.FloatTensor] = None,
) -> torch.FloatTensor:
pos = _check_or_build_spatial_positions(pos, index_dims, batch_size)
fourier_pos_enc = generate_fourier_features(
pos,
num_bands=self.num_bands,
max_resolution=self.max_resolution,
concat_pos=self.concat_pos,
sine_only=self.sine_only,
).to(device=device, dtype=dtype)
return fourier_pos_enc
class AbstractPreprocessor(nn.Module):
@property
def num_channels(self) -> int:
"""Returns size of preprocessor output."""
raise NotImplementedError()
class PerceiverTextPreprocessor(AbstractPreprocessor):
"""
Text preprocessing for Perceiver Encoder. Can be used to embed `inputs` and add positional encodings.
The dimensionality of the embeddings is determined by the `d_model` attribute of the configuration.
Args:
config ([`PerceiverConfig`]):
Model configuration.
"""
def __init__(self, config: PerceiverConfig) -> None:
super().__init__()
self.config = config
self.embeddings = nn.Embedding(num_embeddings=config.vocab_size, embedding_dim=config.d_model)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.d_model)
@property
def num_channels(self) -> int:
return self.config.d_model
def forward(
self,
inputs: torch.LongTensor,
pos: Optional[torch.Tensor] = None,
network_input_is_1d: bool = True,
interpolate_pos_encoding: bool = False,
):
embeddings_without_pos = self.embeddings(inputs)
seq_length = inputs.shape[1]
position_ids = torch.arange(0, seq_length, device=inputs.device)
embeddings = embeddings_without_pos + self.position_embeddings(position_ids)
return embeddings, None, embeddings_without_pos
class PerceiverEmbeddingDecoder(nn.Module):
"""
Module to decode embeddings (for masked language modeling).
Args:
config ([`PerceiverConfig`]):
Model configuration.
"""
def __init__(self, config: PerceiverConfig) -> None:
super().__init__()
self.config = config
self.vocab_size = config.vocab_size
self.bias = nn.Parameter(torch.zeros(self.vocab_size))
def forward(self, hidden_states: torch.Tensor, embedding_layer: torch.Tensor) -> torch.Tensor:
batch_size, seq_len, d_model = hidden_states.shape
# Flatten batch dim
output = torch.matmul(hidden_states.reshape([-1, d_model]), embedding_layer.weight.transpose(0, 1))
output = output + self.bias
return output.reshape([batch_size, seq_len, self.vocab_size])
class PerceiverMultimodalPostprocessor(nn.Module):
"""
Multimodal postprocessing for Perceiver. Can be used to combine modality-specific postprocessors into a single
postprocessor.
Args:
modalities (`Mapping[str, PostprocessorType]`):
Dictionary mapping modality name to postprocessor class for that modality.
input_is_dict (`bool`, *optional*, defaults to `False`):
If True, input is assumed to be dictionary structured, and outputs keep the same dictionary shape. If
False, input is a tensor which is sliced up during postprocessing by *modality_sizes*.
"""
def __init__(self, modalities: Mapping[str, PostprocessorType], input_is_dict: bool = False):
super().__init__()
self.modalities = nn.ModuleDict(modalities)
self.input_is_dict = input_is_dict
def forward(
self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None
) -> Mapping[str, torch.Tensor]:
if not self.input_is_dict:
# Slice up modalities by their sizes.
if modality_sizes is None:
raise ValueError("Modality sizes should be specified if input is not a dictionary.")
inputs = restructure(modality_sizes=modality_sizes, inputs=inputs)
outputs = {
modality: postprocessor(inputs[modality], pos=pos, modality_sizes=None)
for modality, postprocessor in self.modalities.items()
}
return outputs
class PerceiverClassificationPostprocessor(nn.Module):
"""
Classification postprocessing for Perceiver. Can be used to convert the decoder output to classification logits.
Args:
config ([*PerceiverConfig*]):
Model configuration.
in_channels (`int`):
Number of channels in the input.
"""
def __init__(self, config: PerceiverConfig, in_channels: int) -> None:
super().__init__()
self.classifier = nn.Linear(in_channels, config.num_labels)
def forward(self, inputs, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor:
logits = self.classifier(inputs)
return logits[:, 0, :]
class PerceiverAudioPostprocessor(nn.Module):
"""
Audio postprocessing for Perceiver. Can be used to convert the decoder output to audio features.
Args:
config ([*PerceiverConfig*]):
Model configuration.
in_channels (`int`):
Number of channels in the input.
postproc_type (`str`, *optional*, defaults to `"patches"`):
Postprocessor type to use. Currently, only "patches" is supported.
"""
def __init__(self, config: PerceiverConfig, in_channels: int, postproc_type: str = "patches") -> None:
super().__init__()
if postproc_type not in ("patches",): # to be supported: 'conv', 'patches', 'pixels'
raise ValueError("Invalid postproc_type!")
# Architecture parameters:
self.classifier = nn.Linear(in_channels, config.samples_per_patch)
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor:
logits = self.classifier(inputs)
return torch.reshape(logits, [inputs.shape[0], -1])
class PerceiverProjectionPostprocessor(nn.Module):
"""
Projection postprocessing for Perceiver. Can be used to project the channels of the decoder output to a lower
dimension.
Args:
in_channels (`int`):
Number of channels in the input.
out_channels (`int`):
Number of channels in the output.
"""
def __init__(self, in_channels: int, out_channels: int) -> None:
super().__init__()
self.classifier = nn.Linear(in_channels, out_channels)
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, modality_sizes=None) -> torch.Tensor:
logits = self.classifier(inputs)
return logits
class PerceiverImagePreprocessor(AbstractPreprocessor):
"""
Image preprocessing for Perceiver Encoder.
Note: the *out_channels* argument refers to the output channels of a convolutional layer, if *prep_type* is set to
"conv1x1" or "conv". If one adds absolute position embeddings, one must make sure the *num_channels* of the
position encoding kwargs are set equal to the *out_channels*.
Args:
config ([*PerceiverConfig*]):
Model configuration.
prep_type (`str`, *optional*, defaults to `"conv"`):
Preprocessing type. Can be "conv1x1", "conv", "patches", "pixels".
spatial_downsample (`int`, *optional*, defaults to 4):
Spatial downsampling factor.
temporal_downsample (`int`, *optional*, defaults to 1):
Temporal downsampling factor (only relevant in case a time dimension is present).
position_encoding_type (`str`, *optional*, defaults to `"fourier"`):
Position encoding type. Can be "fourier" or "trainable".
in_channels (`int`, *optional*, defaults to 3):
Number of channels in the input.
out_channels (`int`, *optional*, defaults to 64):
Number of channels in the output.
conv_after_patching (`bool`, *optional*, defaults to `False`):
Whether to apply a convolutional layer after patching.
conv_after_patching_in_channels (`int`, *optional*, defaults to 54):
Number of channels in the input of the convolutional layer after patching.
conv2d_use_batchnorm (`bool`, *optional*, defaults to `True`):
Whether to use batch normalization in the convolutional layer.
concat_or_add_pos (`str`, *optional*, defaults to `"concat"`):
How to concatenate the position encoding to the input. Can be "concat" or "add".
project_pos_dim (`int`, *optional*, defaults to -1):
Dimension of the position encoding to project to. If -1, no projection is applied.
**position_encoding_kwargs (`Dict`, *optional*):
Keyword arguments for the position encoding.
"""
def __init__(
self,
config,
prep_type="conv",
spatial_downsample: int = 4,
temporal_downsample: int = 1,
position_encoding_type: str = "fourier",
in_channels: int = 3,
out_channels: int = 64,
conv_after_patching: bool = False,
conv_after_patching_in_channels: int = 54, # only relevant when conv_after_patching = True
conv2d_use_batchnorm: bool = True,
concat_or_add_pos: str = "concat",
project_pos_dim: int = -1,
**position_encoding_kwargs,
):
super().__init__()
self.config = config
if prep_type not in ("conv", "patches", "pixels", "conv1x1"):
raise ValueError(f"Prep_type {prep_type} is invalid")
if concat_or_add_pos not in ["concat", "add"]:
raise ValueError(f"Invalid value {concat_or_add_pos} for concat_or_add_pos.")
self.in_channels = in_channels
self.prep_type = prep_type
self.spatial_downsample = spatial_downsample
self.temporal_downsample = temporal_downsample
self.position_encoding_type = position_encoding_type
self.concat_or_add_pos = concat_or_add_pos
self.conv_after_patching = conv_after_patching
self.out_channels = out_channels
if self.prep_type == "conv":
# Downsampling with conv is currently restricted
convnet_num_layers = math.log(spatial_downsample, 4)
convnet_num_layers_is_int = convnet_num_layers == np.round(convnet_num_layers)
if not convnet_num_layers_is_int or temporal_downsample != 1:
raise ValueError(
"Only powers of 4 expected for spatial and 1 expected for temporal downsampling with conv."
)
self.convnet = Conv2DDownsample(
in_channels=in_channels,
num_layers=int(convnet_num_layers),
out_channels=out_channels,
use_batchnorm=conv2d_use_batchnorm,
)
elif self.prep_type == "conv1x1":
if temporal_downsample != 1:
raise ValueError("Conv1x1 does not downsample in time.")
self.convnet_1x1 = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, 1),
# spatial_downsample is unconstrained for 1x1 convolutions.
stride=(spatial_downsample, spatial_downsample),
)
# Position embeddings
self.project_pos_dim = project_pos_dim
self.position_embeddings, self.positions_projection = build_position_encoding(
position_encoding_type=position_encoding_type,
out_channels=out_channels,
project_pos_dim=project_pos_dim,
**position_encoding_kwargs,
)
# Optional convolutional layer after patches.
self.conv_after_patches = (
nn.Linear(conv_after_patching_in_channels, self.out_channels) if conv_after_patching else nn.Identity()
)
@property
def num_channels(self) -> int:
# Let's assume that the number of resolutions (in the context of image preprocessing)
# of the input data is 2 or 3 depending on whether we are processing image or video respectively.
# In this case, for convenience, we will declare is_temporal variable,
# which will show whether the data has a temporal dimension or not.
is_temporal = self.position_embeddings.num_dimensions > 2
# position embedding
if self.project_pos_dim > 0:
pos_dim = self.project_pos_dim
else:
pos_dim = self.position_embeddings.output_size()
if self.concat_or_add_pos == "add":
return pos_dim
# inputs
if self.conv_after_patching or self.prep_type in ("conv1x1", "conv"):
inp_dim = self.out_channels
elif self.prep_type == "pixels":
inp_dim = self.in_channels
if not is_temporal:
inp_dim = math.ceil(inp_dim / self.spatial_downsample)
elif self.prep_type == "patches":
if self.conv_after_patching:
inp_dim = self.out_channels
else:
inp_dim = self.in_channels * self.spatial_downsample**2
if is_temporal:
inp_dim *= self.temporal_downsample
return inp_dim + pos_dim
def _build_network_inputs(
self, inputs: torch.Tensor, network_input_is_1d: bool = True, interpolate_pos_encoding: bool = False
):
"""
Construct the final input, including position encoding.
This method expects the inputs to always have channels as last dimension.
"""
batch_size = inputs.shape[0]
input_size = inputs.shape[1:3]
index_dims = inputs.shape[1:-1]
indices = np.prod(index_dims)
# Flatten input features to a 1D index dimension if necessary.
if len(inputs.shape) > 3 and network_input_is_1d:
inputs = torch.reshape(inputs, [batch_size, indices, -1])
# Construct the position encoding.
if self.position_encoding_type == "trainable":
pos_enc = self.position_embeddings(batch_size, interpolate_pos_encoding, input_size)
elif self.position_encoding_type == "fourier":
pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype)
# Optionally project them to a target dimension.
pos_enc = self.positions_projection(pos_enc)
if not network_input_is_1d:
# Reshape pos to match the input feature shape
# if the network takes non-1D inputs
sh = inputs.shape
pos_enc = torch.reshape(pos_enc, list(sh)[:-1] + [-1])
if self.concat_or_add_pos == "concat":
inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)
elif self.concat_or_add_pos == "add":
inputs_with_pos = inputs + pos_enc
return inputs_with_pos, inputs
def forward(
self,
inputs: torch.Tensor,
pos: Optional[torch.Tensor] = None,
network_input_is_1d: bool = True,
interpolate_pos_encoding: bool = False,
):
if self.prep_type == "conv":
# Convnet image featurization.
# Downsamples spatially by a factor of 4
inputs = self.convnet(inputs)
elif self.prep_type == "conv1x1":
# map inputs to self.out_channels
inputs = self.convnet_1x1(inputs)
elif self.prep_type == "pixels":
# if requested, downsamples in the crudest way
if inputs.ndim == 4:
inputs = inputs[:: self.spatial_downsample, :: self.spatial_downsample]
elif inputs.ndim == 5:
inputs = inputs[
:, :: self.temporal_downsample, :, :: self.spatial_downsample, :: self.spatial_downsample
]
else:
raise ValueError("Unsupported data format for pixels.")
elif self.prep_type == "patches":
# Space2depth featurization.
# Video: B x T x C x H x W
inputs = space_to_depth(
inputs, temporal_block_size=self.temporal_downsample, spatial_block_size=self.spatial_downsample
)
if inputs.ndim == 5 and inputs.shape[1] == 1:
# for flow
inputs = inputs.squeeze(dim=1)
# Optionally apply conv layer.
inputs = self.conv_after_patches(inputs)
if self.prep_type != "patches":
# move channels to last dimension, as the _build_network_inputs method below expects this
if inputs.ndim == 4:
inputs = inputs.permute(0, 2, 3, 1)
elif inputs.ndim == 5:
inputs = inputs.permute(0, 1, 3, 4, 2)
else:
raise ValueError("Unsupported data format for conv1x1.")
inputs, inputs_without_pos = self._build_network_inputs(inputs, network_input_is_1d, interpolate_pos_encoding)
modality_sizes = None # Size for each modality, only needed for multimodal
return inputs, modality_sizes, inputs_without_pos
class PerceiverOneHotPreprocessor(AbstractPreprocessor):
"""
One-hot preprocessor for Perceiver Encoder. Can be used to add a dummy index dimension to the input.
Args:
config ([`PerceiverConfig`]):
Model configuration.
"""
def __init__(self, config: PerceiverConfig) -> None:
super().__init__()
self.config: PerceiverConfig = config
@property
def num_channels(self) -> int:
return self.config.num_labels
def forward(self, inputs: torch.Tensor, pos: Optional[torch.Tensor] = None, network_input_is_1d: bool = True):
# Add a dummy index dimension.
inputs = inputs[:, None, :]
# No position encodings, so the 1st (input) and 3rd (inputs_without_pos)
# outputs are identical.
return inputs, None, inputs
class PerceiverAudioPreprocessor(AbstractPreprocessor):
"""
Audio preprocessing for Perceiver Encoder.
Args:
config ([*PerceiverConfig*]):
Model configuration.
prep_type (`str`, *optional*, defaults to `"patches"`):
Preprocessor type to use. Only "patches" is supported.
samples_per_patch (`int`, *optional*, defaults to 96):
Number of samples per patch.
position_encoding_type (`str`, *optional*, defaults to `"fourier"`):
Type of position encoding to use. Can be "trainable" or "fourier".
concat_or_add_pos (`str`, *optional*, defaults to `"concat"`):
How to concatenate the position encoding to the input. Can be "concat" or "add".
out_channels (`int`, *optional*, defaults to 64):
Number of channels in the output.
project_pos_dim (`int`, *optional*, defaults to -1):
Dimension of the position encoding to project to. If -1, no projection is applied.
**position_encoding_kwargs (`Dict`, *optional*):
Keyword arguments for the position encoding.
"""
def __init__(
self,
config,
prep_type: str = "patches",
samples_per_patch: int = 96,
position_encoding_type: str = "fourier",
concat_or_add_pos: str = "concat",
out_channels=64,
project_pos_dim=-1,
**position_encoding_kwargs,
):
super().__init__()
self.config = config
if prep_type not in ("patches",):
raise ValueError(f"Prep_type {prep_type} is invalid, can only be 'patches'.")
if concat_or_add_pos not in ["concat", "add"]:
raise ValueError(f"Concat_or_pos {concat_or_add_pos} is invalid, can only be 'concat' or 'add'.")
self.samples_per_patch = samples_per_patch
self.position_encoding_type = position_encoding_type
self.concat_or_add_pos = concat_or_add_pos
self.project_pos_dim = project_pos_dim
# Position embeddings
self.position_embeddings, self.positions_projection = build_position_encoding(
position_encoding_type=position_encoding_type,
out_channels=out_channels,
project_pos_dim=project_pos_dim,
**position_encoding_kwargs,
)
@property
def num_channels(self) -> int:
# position embedding
if self.project_pos_dim > 0:
pos_dim = self.project_pos_dim
else:
pos_dim = self.position_embeddings.output_size()
if self.concat_or_add_pos == "add":
return pos_dim
return self.samples_per_patch + pos_dim
def _build_network_inputs(self, inputs):
"""Construct the final input, including position encoding."""
batch_size = inputs.shape[0]
index_dims = inputs.shape[1:-1]
# Construct the position encoding.
if self.position_encoding_type == "trainable":
pos_enc = self.position_embeddings(batch_size)
elif self.position_encoding_type == "fourier":
pos_enc = self.position_embeddings(index_dims, batch_size, device=inputs.device, dtype=inputs.dtype)
# Optionally project them to a target dimension.
pos_enc = self.positions_projection(pos_enc)
if self.concat_or_add_pos == "concat":
inputs_with_pos = torch.cat([inputs, pos_enc], dim=-1)
elif self.concat_or_add_pos == "add":
inputs_with_pos = inputs + pos_enc
return inputs_with_pos, inputs
def forward(
self,
inputs: torch.Tensor,
pos: Optional[torch.Tensor] = None,
network_input_is_1d: bool = True,
interpolate_pos_encoding: bool = False,
):
inputs = torch.reshape(inputs, [inputs.shape[0], -1, self.samples_per_patch])
inputs, inputs_without_pos = self._build_network_inputs(inputs)
modality_sizes = None # Size for each modality, only needed for multimodal
return inputs, modality_sizes, inputs_without_pos
class PerceiverMultimodalPreprocessor(AbstractPreprocessor):
"""
Multimodal preprocessing for Perceiver Encoder.
Inputs for each modality are preprocessed, then padded with trainable position embeddings to have the same number
of channels.
Args:
modalities (`Mapping[str, PreprocessorType]`):
Dict mapping modality name to preprocessor.
mask_probs (`dict[str, float]`):
Dict mapping modality name to masking probability of that modality.
min_padding_size (`int`, *optional*, defaults to 2):
The minimum padding size for all modalities. The final output will have num_channels equal to the maximum
channels across all modalities plus min_padding_size.
"""
def __init__(
self,
modalities: Mapping[str, PreprocessorType],
mask_probs: Optional[Mapping[str, float]] = None,
min_padding_size: int = 2,
):
super().__init__()
self.modalities = nn.ModuleDict(modalities)
self.min_padding_size = min_padding_size
self.mask_probs = mask_probs if mask_probs is not None else {}
self.padding = nn.ParameterDict(
{
modality: nn.Parameter(torch.randn(1, self.num_channels - preprocessor.num_channels))
for modality, preprocessor in modalities.items()
}
)
self.mask = nn.ParameterDict(
{modality: nn.Parameter(torch.randn(1, self.num_channels)) for modality, _ in self.mask_probs.items()}
)
@property
def num_channels(self) -> int:
max_channel_size = max(processor.num_channels for _, processor in self.modalities.items())
common_channel_size = max_channel_size + self.min_padding_size
return common_channel_size
def forward(
self,
inputs: Mapping[str, torch.Tensor],
pos: Optional[torch.Tensor] = None,
network_input_is_1d: bool = True,
interpolate_pos_encoding: bool = False,
) -> PreprocessorOutputType:
padded = {}
modality_sizes = {}
inputs_without_pos = {}
for modality, preprocessor in self.modalities.items():
# preprocess each modality using the respective preprocessor.
output, _, inputs_without_pos[modality] = preprocessor(
inputs[modality], pos=pos, network_input_is_1d=network_input_is_1d
)
# pad to the same common_channel_size.
batch_size, num_samples, num_channels = output.shape
pos_enc = self.padding[modality].expand(batch_size, -1, -1)
padding = torch.broadcast_to(
pos_enc,
[batch_size, num_samples, self.num_channels - num_channels],
)
output_padded = torch.cat([output, padding], dim=2)
# mask if required
if modality in self.mask_probs:
mask_token = self.mask[modality].expand(batch_size, -1, -1)
mask_prob = self.mask_probs[modality]
mask = torch.bernoulli(torch.full([batch_size, num_samples], mask_prob))
mask = torch.unsqueeze(mask, dim=2).to(mask_token.device)
output_padded = (1 - mask) * output_padded + mask * mask_token
padded[modality] = output_padded
modality_sizes[modality] = output_padded.shape[1]
# Apply a predictable ordering to the modalities
padded_ls = [padded[k] for k in sorted(padded.keys())]
# Finally, concatenate along the time dimension
final_inputs = torch.cat(padded_ls, dim=1)
return final_inputs, modality_sizes, inputs_without_pos
__all__ = [
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
| transformers/src/transformers/models/perceiver/modeling_perceiver.py/0 | {
"file_path": "transformers/src/transformers/models/perceiver/modeling_perceiver.py",
"repo_id": "transformers",
"token_count": 60857
} | 498 |
# coding=utf-8
# Copyright 2023 Microsoft and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Weights conversion script for Phi
This script downloads both Phi-1 and Phi-1.5 checkpoints to "checkpoint_path" and then converts the weights to
HugfgingFace model's format and saves them in "pytorch_dump_folder_path".
Example : $python ./convert_phi_weights_to_hf.py --model_name "microsoft/phi-2" --pytorch_dump_folder ./dump_folder/ --checkpoint_path ./ckpt_path/
"""
import argparse
import gc
import os
import safetensors
import torch
from huggingface_hub import hf_hub_download
from transformers import PhiConfig, PhiForCausalLM
_MODELS = {
"microsoft/phi-1": ["https://huggingface.co/microsoft/phi-1/blob/main/pytorch_model.bin"],
"microsoft/phi-1_5": ["https://huggingface.co/microsoft/phi-1_5/blob/main/pytorch_model.bin"],
"microsoft/phi-2": [
"https://huggingface.co/microsoft/phi-2/blob/main/model-00001-of-00002.safetensors",
"https://huggingface.co/microsoft/phi-2/blob/main/model-00002-of-00002.safetensors",
],
}
PHI_MAPPING = {
"transformer.embd.wte.weight": "model.embed_tokens.weight",
"lm_head.linear": "lm_head",
"lm_head.ln": "model.final_layernorm",
"layers": "model.layers",
"transformer": "model",
".h.": ".layers.",
"ln": "input_layernorm",
"mixer": "self_attn",
"Wqkv": "query_key_value",
"out_proj": "dense",
}
def convert_weights(original_weights, mapping, config):
converted_weights = {}
original_weights_keys = sorted(original_weights.keys())
for original_weights_key in original_weights_keys:
new_key = original_weights_key
if "rotary_emb" in new_key:
continue
if "Wqkv" in new_key:
if "weight" in new_key:
weight = original_weights[new_key]
weights_shape = weight.shape
weight = (
weight.view(3, config.num_attention_heads, -1, config.hidden_size)
.transpose(0, 1)
.reshape(*weights_shape)
)
original_weights[new_key] = weight
elif "bias" in new_key:
bias = original_weights[new_key]
bias_shape = bias.shape
bias = bias.view(3, config.num_attention_heads, -1).transpose(0, 1).reshape(*bias_shape)
original_weights[new_key] = bias
for k, v in mapping.items():
if k in new_key:
new_key = new_key.replace(k, v)
converted_weights[new_key] = original_weights.pop(original_weights_key)
return converted_weights
def _download(url: str, root: str):
repo_id = f"{url.split('/')[3]}/{url.split('/')[4]}"
filename = f"{url.split('/')[-1]}"
hf_hub_download(
repo_id=repo_id,
filename=filename,
force_filename=root,
local_dir_use_symlinks=False,
)
def convert_phi_weights(
model_name, checkpoint_path, pytorch_dump_folder_path, use_cuda, save_weights_directly, _MODELS
):
_MODELS = _MODELS if model_name not in _MODELS else {model_name: _MODELS.get(model_name)}
device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
for model_name, model_url in _MODELS.items():
converted_checkpoint = {}
model_checkpoint = {}
# for phi-2 the weights are stored in 2 different safetensors file so we need to iterate over that list and download one at a time
for model_each_url in model_url:
model_path = os.path.join(checkpoint_path, model_name + "_" + model_each_url.split("/")[-1])
if not os.path.exists(model_path):
print(f"\n{model_name} was not found! Downloading it to {model_path}")
_download(url=model_each_url, root=model_path)
if model_path.endswith("safetensors"):
loaded_weights = safetensors.torch.load_file(model_path, device=device)
else:
loaded_weights = torch.load(model_path, map_location=device, weights_only=True)
model_checkpoint.update(**loaded_weights)
model_type = model_name.split("/")[1] # phi-1 or phi-1_5 or phi-2
# init the config for phi-1 and phi-1.5
config = PhiConfig()
# if we are dealing with phi-2 then update the config
if model_type == "phi-2":
config.hidden_size = 2560
config.intermediate_size = 10240
config.num_hidden_layers = 32
config.resid_pdrop = 0.1
config.partial_rotary_factor = 0.4
config.num_hidden_layers = 32
config.dtype = "float16"
# Converting the weights
converted_checkpoint.update(**convert_weights(model_checkpoint, PHI_MAPPING, config))
# Save either the whole model or the converted weights
if save_weights_directly:
save_weights_path = os.path.join(pytorch_dump_folder_path, model_type + "_pytorch_model.bin")
torch.save(converted_checkpoint, save_weights_path)
print(f"Model weights saved at {save_weights_path}!")
else:
model = PhiForCausalLM(config).to(device)
model.load_state_dict(converted_checkpoint, strict=True)
save_model_path = os.path.join(pytorch_dump_folder_path, model_type)
model.save_pretrained(save_model_path)
print(f"Model saved at {save_model_path}!")
# release GPU memory for the 2nd model if cuda was used.
del config, model
# release GPU memory for the 2nd model if cuda was used.
del model_checkpoint, converted_checkpoint
if use_cuda:
torch.cuda.empty_cache()
gc.collect()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# # Required parameters
parser.add_argument(
"--model_name",
type=str,
help="Name of the model to convert. (Please enter one of the following: phi-1, phi-1_5, phi-2). If nothing is provided, all models will be converted.",
default=None,
)
parser.add_argument(
"--checkpoint_path", type=str, help="Path to the folder of downloaded checkpoints. (Please enter full path)"
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the output PyTorch model. (Please enter full path)",
)
parser.add_argument(
"--use_cuda",
default=False,
type=bool,
help="Whether to load the weights on GPU during conversion or not, False by default",
)
parser.add_argument(
"--save_weights_directly",
default=True,
type=bool,
help="Whether to save the weights directly after conversion or load the weight to the Phi model and then save "
"the Phi model along with weights. True by default",
)
args = parser.parse_args()
convert_phi_weights(
args.model_name,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.use_cuda,
args.save_weights_directly,
_MODELS,
)
| transformers/src/transformers/models/phi/convert_phi_weights_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/phi/convert_phi_weights_to_hf.py",
"repo_id": "transformers",
"token_count": 3301
} | 499 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for Pixtral.
"""
from typing import Union
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput, is_valid_image, load_image
from ...processing_utils import (
MultiModalData,
ProcessingKwargs,
ProcessorMixin,
Unpack,
)
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import is_vision_available, logging
if is_vision_available():
from .image_processing_pixtral import get_resize_output_image_size
logger = logging.get_logger(__name__)
class PixtralProcessorKwargs(ProcessingKwargs, total=False):
_defaults = {
"text_kwargs": {
"padding": False,
"return_mm_token_type_ids": False,
},
"images_kwargs": {},
"common_kwargs": {
"return_tensors": "pt",
},
}
# Copied from transformers.models.idefics2.processing_idefics2.is_url
def is_url(val) -> bool:
return isinstance(val, str) and val.startswith("http")
# Copied from transformers.models.idefics2.processing_idefics2.is_image_or_image_url
def is_image_or_image_url(elem):
return is_url(elem) or is_valid_image(elem)
class PixtralProcessor(ProcessorMixin):
r"""
Constructs a Pixtral processor which wraps a Pixtral image processor and a Pixtral tokenizer into a single processor.
[`PixtralProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`LlamaTokenizerFast`]. See the
[`~PixtralProcessor.__call__`] and [`~PixtralProcessor.decode`] for more information.
Args:
image_processor ([`PixtralImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`LlamaTokenizerFast`], *optional*):
The tokenizer is a required input.
patch_size (`int`, *optional*, defaults to 16):
Patch size from the vision tower.
spatial_merge_size (`int`, *optional*, defaults to 1):
The downsampling factor for the spatial merge operation.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
image_token (`str`, *optional*, defaults to `"[IMG]"`):
Special token used to denote image location.
image_break_token (`str`, *optional*, defaults to `"[IMG_BREAK]"`):
Special token used to denote the end of a line of pixels in an image.
image_end_token (`str`, *optional*, defaults to `"[IMG_END]"`):
Special token used to denote the end of an image input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "AutoImageProcessor"
tokenizer_class = "AutoTokenizer"
def __init__(
self,
image_processor=None,
tokenizer=None,
patch_size: int = 16,
spatial_merge_size: int = 1,
chat_template=None,
image_token="[IMG]", # set the default and let users change if they have peculiar special tokens in rare cases
image_break_token="[IMG_BREAK]",
image_end_token="[IMG_END]",
**kwargs,
):
self.patch_size = patch_size
self.spatial_merge_size = spatial_merge_size
self.image_token = image_token
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
self.image_break_token = image_break_token
self.image_end_token = image_end_token
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
self.image_break_token_id = tokenizer.convert_tokens_to_ids(self.image_break_token)
self.image_end_token_id = tokenizer.convert_tokens_to_ids(self.image_end_token)
self.image_ids = [self.image_token_id, self.image_break_token_id, self.image_end_token_id]
super().__init__(image_processor, tokenizer, chat_template=chat_template)
def __call__(
self,
images: ImageInput = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
audio=None,
videos=None,
**kwargs: Unpack[PixtralProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
PixtralProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
patch_size = self.patch_size * self.spatial_merge_size
if images is not None:
if is_image_or_image_url(images):
images = [images]
elif isinstance(images, (list, tuple)) and is_image_or_image_url(images[0]):
pass
elif (
isinstance(images, (list, tuple))
and isinstance(images[0], (list, tuple))
and is_image_or_image_url(images[0][0])
):
images = [image for sublist in images for image in sublist]
else:
raise ValueError(
"Invalid input images. Please provide a single image, a list of images, or a list of lists of images."
)
images = [load_image(im) if isinstance(im, str) else im for im in images]
image_inputs = self.image_processor(images, patch_size=patch_size, **output_kwargs["images_kwargs"])
else:
image_inputs = {}
if isinstance(text, str):
text = [text]
elif not isinstance(text, list) and not isinstance(text[0], str):
raise TypeError("Invalid input text. Please provide a string, or a list of strings")
# try to expand inputs in processing if we have the necessary parts
prompt_strings = text
if image_inputs.get("pixel_values") is not None:
# Replace the image token with the expanded image token sequence
image_sizes = iter(image_inputs["image_sizes"])
prompt_strings = []
replace_strings = []
for sample in text:
while self.image_token in sample:
height, width = next(image_sizes)
num_height_tokens = height // patch_size
num_width_tokens = width // patch_size
replace_tokens = [
[self.image_token] * num_width_tokens + [self.image_break_token]
] * num_height_tokens
# Flatten list
replace_tokens = [item for sublist in replace_tokens for item in sublist]
replace_tokens[-1] = self.image_end_token
replace_str = "".join(replace_tokens)
replace_strings.append(replace_str)
sample = sample.replace(self.image_token, "<placeholder>", 1)
while "<placeholder>" in sample:
replace_str = replace_strings.pop(0)
sample = sample.replace("<placeholder>", replace_str, 1)
prompt_strings.append(sample)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(prompt_strings, **output_kwargs["text_kwargs"], return_tensors=None)
self._check_special_mm_tokens(prompt_strings, text_inputs, modalities=["image"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[np.isin(array_ids, self.image_ids)] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = PixtralProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
size = images_kwargs.get("size", None) or self.image_processor.size
patch_size = self.patch_size * self.spatial_merge_size
num_image_tokens = []
for height, width in image_sizes:
resized_height, resized_width = get_resize_output_image_size(
np.zeros((height, width, 3)),
size=(size["longest_edge"], size["longest_edge"]),
patch_size=(patch_size, patch_size),
)
num_height_tokens = resized_height // patch_size
num_width_tokens = resized_width // patch_size
num_image_tokens.append((num_width_tokens + 1) * num_height_tokens)
num_image_patches = [1] * len(image_sizes)
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
return MultiModalData(**vision_data)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return tokenizer_input_names + image_processor_input_names + ["image_sizes"]
__all__ = ["PixtralProcessor"]
| transformers/src/transformers/models/pixtral/processing_pixtral.py/0 | {
"file_path": "transformers/src/transformers/models/pixtral/processing_pixtral.py",
"repo_id": "transformers",
"token_count": 5415
} | 500 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""File for loading the Pop2Piano model weights from the official repository and to show how tokenizer vocab was
constructed"""
import json
import torch
from transformers import Pop2PianoConfig, Pop2PianoForConditionalGeneration
########################## MODEL WEIGHTS ##########################
# This weights were downloaded from the official pop2piano repository
# https://huggingface.co/sweetcocoa/pop2piano/blob/main/model-1999-val_0.67311615.ckpt
official_weights = torch.load("./model-1999-val_0.67311615.ckpt", weights_only=True)
state_dict = {}
# load the config and init the model
cfg = Pop2PianoConfig.from_pretrained("sweetcocoa/pop2piano")
model = Pop2PianoForConditionalGeneration(cfg)
# load relative attention bias
state_dict["encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = official_weights["state_dict"][
"transformer.encoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"
]
state_dict["decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"] = official_weights["state_dict"][
"transformer.decoder.block.0.layer.0.SelfAttention.relative_attention_bias.weight"
]
# load embed tokens and final layer norm for both encoder and decoder
state_dict["encoder.embed_tokens.weight"] = official_weights["state_dict"]["transformer.encoder.embed_tokens.weight"]
state_dict["decoder.embed_tokens.weight"] = official_weights["state_dict"]["transformer.decoder.embed_tokens.weight"]
state_dict["encoder.final_layer_norm.weight"] = official_weights["state_dict"][
"transformer.encoder.final_layer_norm.weight"
]
state_dict["decoder.final_layer_norm.weight"] = official_weights["state_dict"][
"transformer.decoder.final_layer_norm.weight"
]
# load lm_head, mel_conditioner.emb and shared
state_dict["lm_head.weight"] = official_weights["state_dict"]["transformer.lm_head.weight"]
state_dict["mel_conditioner.embedding.weight"] = official_weights["state_dict"]["mel_conditioner.embedding.weight"]
state_dict["shared.weight"] = official_weights["state_dict"]["transformer.shared.weight"]
# load each encoder blocks
for i in range(cfg.num_layers):
# layer 0
state_dict[f"encoder.block.{i}.layer.0.SelfAttention.q.weight"] = official_weights["state_dict"][
f"transformer.encoder.block.{i}.layer.0.SelfAttention.q.weight"
]
state_dict[f"encoder.block.{i}.layer.0.SelfAttention.k.weight"] = official_weights["state_dict"][
f"transformer.encoder.block.{i}.layer.0.SelfAttention.k.weight"
]
state_dict[f"encoder.block.{i}.layer.0.SelfAttention.v.weight"] = official_weights["state_dict"][
f"transformer.encoder.block.{i}.layer.0.SelfAttention.v.weight"
]
state_dict[f"encoder.block.{i}.layer.0.SelfAttention.o.weight"] = official_weights["state_dict"][
f"transformer.encoder.block.{i}.layer.0.SelfAttention.o.weight"
]
state_dict[f"encoder.block.{i}.layer.0.layer_norm.weight"] = official_weights["state_dict"][
f"transformer.encoder.block.{i}.layer.0.layer_norm.weight"
]
# layer 1
state_dict[f"encoder.block.{i}.layer.1.DenseReluDense.wi_0.weight"] = official_weights["state_dict"][
f"transformer.encoder.block.{i}.layer.1.DenseReluDense.wi_0.weight"
]
state_dict[f"encoder.block.{i}.layer.1.DenseReluDense.wi_1.weight"] = official_weights["state_dict"][
f"transformer.encoder.block.{i}.layer.1.DenseReluDense.wi_1.weight"
]
state_dict[f"encoder.block.{i}.layer.1.DenseReluDense.wo.weight"] = official_weights["state_dict"][
f"transformer.encoder.block.{i}.layer.1.DenseReluDense.wo.weight"
]
state_dict[f"encoder.block.{i}.layer.1.layer_norm.weight"] = official_weights["state_dict"][
f"transformer.encoder.block.{i}.layer.1.layer_norm.weight"
]
# load each decoder blocks
for i in range(6):
# layer 0
state_dict[f"decoder.block.{i}.layer.0.SelfAttention.q.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.0.SelfAttention.q.weight"
]
state_dict[f"decoder.block.{i}.layer.0.SelfAttention.k.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.0.SelfAttention.k.weight"
]
state_dict[f"decoder.block.{i}.layer.0.SelfAttention.v.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.0.SelfAttention.v.weight"
]
state_dict[f"decoder.block.{i}.layer.0.SelfAttention.o.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.0.SelfAttention.o.weight"
]
state_dict[f"decoder.block.{i}.layer.0.layer_norm.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.0.layer_norm.weight"
]
# layer 1
state_dict[f"decoder.block.{i}.layer.1.EncDecAttention.q.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.1.EncDecAttention.q.weight"
]
state_dict[f"decoder.block.{i}.layer.1.EncDecAttention.k.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.1.EncDecAttention.k.weight"
]
state_dict[f"decoder.block.{i}.layer.1.EncDecAttention.v.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.1.EncDecAttention.v.weight"
]
state_dict[f"decoder.block.{i}.layer.1.EncDecAttention.o.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.1.EncDecAttention.o.weight"
]
state_dict[f"decoder.block.{i}.layer.1.layer_norm.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.1.layer_norm.weight"
]
# layer 2
state_dict[f"decoder.block.{i}.layer.2.DenseReluDense.wi_0.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.2.DenseReluDense.wi_0.weight"
]
state_dict[f"decoder.block.{i}.layer.2.DenseReluDense.wi_1.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.2.DenseReluDense.wi_1.weight"
]
state_dict[f"decoder.block.{i}.layer.2.DenseReluDense.wo.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.2.DenseReluDense.wo.weight"
]
state_dict[f"decoder.block.{i}.layer.2.layer_norm.weight"] = official_weights["state_dict"][
f"transformer.decoder.block.{i}.layer.2.layer_norm.weight"
]
model.load_state_dict(state_dict, strict=True)
# save the weights
torch.save(state_dict, "./pytorch_model.bin")
########################## TOKENIZER ##########################
# the tokenize and detokenize methods are taken from the official implementation
# link : https://github.com/sweetcocoa/pop2piano/blob/fac11e8dcfc73487513f4588e8d0c22a22f2fdc5/midi_tokenizer.py#L34
def tokenize(idx, token_type, n_special=4, n_note=128, n_velocity=2):
if token_type == "TOKEN_TIME":
return n_special + n_note + n_velocity + idx
elif token_type == "TOKEN_VELOCITY":
return n_special + n_note + idx
elif token_type == "TOKEN_NOTE":
return n_special + idx
elif token_type == "TOKEN_SPECIAL":
return idx
else:
return -1
# link : https://github.com/sweetcocoa/pop2piano/blob/fac11e8dcfc73487513f4588e8d0c22a22f2fdc5/midi_tokenizer.py#L48
def detokenize(idx, n_special=4, n_note=128, n_velocity=2, time_idx_offset=0):
if idx >= n_special + n_note + n_velocity:
return "TOKEN_TIME", (idx - (n_special + n_note + n_velocity)) + time_idx_offset
elif idx >= n_special + n_note:
return "TOKEN_VELOCITY", idx - (n_special + n_note)
elif idx >= n_special:
return "TOKEN_NOTE", idx - n_special
else:
return "TOKEN_SPECIAL", idx
# crate the decoder and then the encoder of the tokenizer
decoder = {}
for i in range(cfg.vocab_size):
decoder.update({i: f"{detokenize(i)[1]}_{detokenize(i)[0]}"})
encoder = {v: k for k, v in decoder.items()}
# save the vocab
with open("./vocab.json", "w") as file:
file.write(json.dumps(encoder))
| transformers/src/transformers/models/pop2piano/convert_pop2piano_weights_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/pop2piano/convert_pop2piano_weights_to_hf.py",
"repo_id": "transformers",
"token_count": 3453
} | 501 |
# coding=utf-8
# Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch Qwen2MoE model."""
import math
from typing import Optional, Union
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache, StaticCache
from ...generation import GenerationMixin
from ...modeling_attn_mask_utils import AttentionMaskConverter
from ...modeling_flash_attention_utils import flash_attn_supports_top_left_mask, is_flash_attn_available
from ...modeling_layers import (
GenericForQuestionAnswering,
GenericForSequenceClassification,
GenericForTokenClassification,
GradientCheckpointingLayer,
)
from ...modeling_outputs import (
MoeCausalLMOutputWithPast,
MoeModelOutputWithPast,
)
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging
from ...utils.deprecation import deprecate_kwarg
from .configuration_qwen2_moe import Qwen2MoeConfig
if is_flash_attn_available():
from ...modeling_flash_attention_utils import _flash_attention_forward
if is_torch_flex_attn_available():
from torch.nn.attention.flex_attention import BlockMask
from ...integrations.flex_attention import make_flex_block_causal_mask
logger = logging.get_logger(__name__)
def load_balancing_loss_func(
gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
num_experts: Optional[int] = None,
top_k=2,
attention_mask: Optional[torch.Tensor] = None,
) -> Union[torch.Tensor, int]:
r"""
Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
experts is too unbalanced.
Args:
gate_logits:
Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
shape [batch_size X sequence_length, num_experts].
num_experts:
Number of experts
top_k:
The number of experts to route per-token, can be also interpreted as the `top-k` routing
parameter.
attention_mask (`torch.Tensor`, *optional*):
The attention_mask used in forward function
shape [batch_size X sequence_length] if not None.
Returns:
The auxiliary loss.
"""
if gate_logits is None or not isinstance(gate_logits, tuple):
return 0
if isinstance(gate_logits, tuple):
compute_device = gate_logits[0].device
concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
_, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
if attention_mask is None:
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.mean(routing_weights, dim=0)
else:
batch_size, sequence_length = attention_mask.shape
num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
# Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
expert_attention_mask = (
attention_mask[None, :, :, None, None]
.expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
.reshape(-1, top_k, num_experts)
.to(compute_device)
)
# Compute the percentage of tokens routed to each experts
tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
expert_attention_mask, dim=0
)
# Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
router_per_expert_attention_mask = (
attention_mask[None, :, :, None]
.expand((num_hidden_layers, batch_size, sequence_length, routing_weights.shape[1]))
.reshape(-1, routing_weights.shape[1])
.to(compute_device)
)
# Compute the average probability of routing to these experts
router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
router_per_expert_attention_mask, dim=0
)
device_index = routing_weights.device.index if routing_weights.device.index is not None else 0
rank = routing_weights.shape[1] * int(device_index)
overall_loss = torch.sum(
tokens_per_expert[:, rank : rank + routing_weights.shape[1]] * router_prob_per_expert.unsqueeze(0)
)
return overall_loss * num_experts
# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Qwen2Moe
class Qwen2MoeRMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Qwen2MoeRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding with Llama->Qwen2Moe
class Qwen2MoeRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Qwen2MoeConfig, device=None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
# Copied from transformers.models.llama.modeling_llama.rotate_half
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
# Copied from transformers.models.llama.modeling_llama.apply_rotary_pos_emb
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
# Modified from transformers.models.mistral.modeling_mistral.MistralMLP with Mistral->Qwen2Moe
class Qwen2MoeMLP(nn.Module):
def __init__(self, config, intermediate_size=None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_act]
def forward(self, x):
return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
# Copied from transformers.models.llama.modeling_llama.repeat_kv
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
# copied from transformers.models.qwen2.modeling_qwen2.Qwen2Attention with Qwen2->Qwen2Moe
# no longer copied after attention refactors
class Qwen2MoeAttention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
and "Generating Long Sequences with Sparse Transformers".
"""
def __init__(self, config: Qwen2MoeConfig, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(
f"Instantiating {self.__class__.__name__} without passing `layer_idx` is not recommended and will "
"to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.rope_theta = config.rope_theta
self.is_causal = True
self.attention_dropout = config.attention_dropout
if (self.head_dim * self.num_heads) != self.hidden_size:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=self.config.qkv_bias)
self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=self.config.qkv_bias)
self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=self.config.qkv_bias)
self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)
self.rotary_emb = Qwen2MoeRotaryEmbedding(config=self.config)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
# repeat k/v heads if n_kv_heads < n_heads
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
if attention_mask is not None: # no matter the length, we just slice it
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
# NO LONGER EXIST Copied from transformers.models.qwen2.modeling_qwen2.Qwen2FlashAttention2 with Qwen2->Qwen2Moe
# TODO cyril: modular
class Qwen2MoeFlashAttention2(Qwen2MoeAttention):
"""
Qwen2Moe flash attention module, following Qwen2Moe attention module. This module inherits from `Qwen2MoeAttention`
as the weights of the module stays untouched. The only required change would be on the forward pass
where it needs to correctly call the public API of flash attention and deal with padding tokens
in case the input contains any of them. Additionally, for sliding window attention, we apply SWA only to the bottom
config.max_window_layers layers.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
# flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignment, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
# Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
self._flash_attn_uses_top_left_mask = flash_attn_supports_top_left_mask()
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
):
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
# repeat k/v heads if n_kv_heads < n_heads
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
dropout_rate = 0.0 if not self.training else self.attention_dropout
# In PEFT, usually we cast the layer norms in float32 for training stability reasons
# therefore the input hidden states gets silently casted in float32. Hence, we need
# cast them back in float16 just to be sure everything works as expected.
input_dtype = query_states.dtype
device_type = query_states.device.type if query_states.device.type != "mps" else "cpu"
if input_dtype == torch.float32:
if torch.is_autocast_enabled():
target_dtype = (
torch.get_autocast_dtype(device_type)
if hasattr(torch, "get_autocast_dtype")
else torch.get_autocast_gpu_dtype()
)
# Handle the case where the model is quantized
elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype
else:
target_dtype = self.q_proj.weight.dtype
logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to"
f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
f" {target_dtype}."
)
query_states = query_states.to(target_dtype)
key_states = key_states.to(target_dtype)
value_states = value_states.to(target_dtype)
# Reashape to the expected shape for Flash Attention
query_states = query_states.transpose(1, 2)
key_states = key_states.transpose(1, 2)
value_states = value_states.transpose(1, 2)
if (
self.config.use_sliding_window
and getattr(self.config, "sliding_window", None) is not None
and self.layer_idx >= self.config.max_window_layers
):
sliding_window = self.config.sliding_window
else:
sliding_window = None
attn_output = _flash_attention_forward(
query_states,
key_states,
value_states,
attention_mask,
q_len,
position_ids=position_ids,
dropout=dropout_rate,
sliding_window=sliding_window,
is_causal=self.is_causal,
use_top_left_mask=self._flash_attn_uses_top_left_mask,
)
attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
attn_output = self.o_proj(attn_output)
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
# NO LONGER EXIST Copied from transformers.models.qwen2.modeling_qwen2.Qwen2SdpaAttention with Qwen2->Qwen2Moe
# TODO cyril: modular
class Qwen2MoeSdpaAttention(Qwen2MoeAttention):
"""
Qwen2Moe attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
`Qwen2MoeAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
SDPA API.
"""
# Adapted from Qwen2MoeAttention.forward
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
if output_attentions:
# TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
logger.warning_once(
"Qwen2MoeModel is using Qwen2MoeSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
)
return super().forward(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
)
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
query_states = query_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, -1, self.head_dim).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} # Specific to RoPE models
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
causal_mask = attention_mask
if attention_mask is not None: # no matter the length, we just slice it
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
# SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
# Reference: https://github.com/pytorch/pytorch/issues/112577.
if query_states.device.type == "cuda" and attention_mask is not None:
query_states = query_states.contiguous()
key_states = key_states.contiguous()
value_states = value_states.contiguous()
# We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
# in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
# The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
is_causal = bool(causal_mask is None and q_len > 1)
attn_output = torch.nn.functional.scaled_dot_product_attention(
query_states,
key_states,
value_states,
attn_mask=causal_mask,
dropout_p=self.attention_dropout if self.training else 0.0,
is_causal=is_causal,
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, self.hidden_size)
attn_output = self.o_proj(attn_output)
return attn_output, None
QWEN2MOE_ATTENTION_CLASSES = {
"eager": Qwen2MoeAttention,
"flash_attention_2": Qwen2MoeFlashAttention2,
"sdpa": Qwen2MoeSdpaAttention,
}
class Qwen2MoeSparseMoeBlock(nn.Module):
def __init__(self, config):
super().__init__()
self.num_experts = config.num_experts
self.top_k = config.num_experts_per_tok
self.norm_topk_prob = config.norm_topk_prob
# gating
self.gate = nn.Linear(config.hidden_size, config.num_experts, bias=False)
self.experts = nn.ModuleList(
[Qwen2MoeMLP(config, intermediate_size=config.moe_intermediate_size) for _ in range(self.num_experts)]
)
self.shared_expert = Qwen2MoeMLP(config, intermediate_size=config.shared_expert_intermediate_size)
self.shared_expert_gate = torch.nn.Linear(config.hidden_size, 1, bias=False)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
""" """
batch_size, sequence_length, hidden_dim = hidden_states.shape
hidden_states = hidden_states.view(-1, hidden_dim)
# router_logits: (batch * sequence_length, n_experts)
router_logits = self.gate(hidden_states)
routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float)
routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1)
if self.norm_topk_prob:
routing_weights /= routing_weights.sum(dim=-1, keepdim=True)
# we cast back to the input dtype
routing_weights = routing_weights.to(hidden_states.dtype)
final_hidden_states = torch.zeros(
(batch_size * sequence_length, hidden_dim), dtype=hidden_states.dtype, device=hidden_states.device
)
# One hot encode the selected experts to create an expert mask
# this will be used to easily index which expert is going to be sollicitated
expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0)
# Loop over all available experts in the model and perform the computation on each expert
expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
for expert_idx in expert_hit:
expert_layer = self.experts[expert_idx]
idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
# Index the correct hidden states and compute the expert hidden state for
# the current expert. We need to make sure to multiply the output hidden
# states by `routing_weights` on the corresponding tokens (top-1 and top-2)
current_state = hidden_states[None, top_x].reshape(-1, hidden_dim)
current_hidden_states = expert_layer(current_state) * routing_weights[top_x, idx, None]
# However `index_add_` only support torch tensors for indexing so we'll use
# the `top_x` tensor here.
final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
shared_expert_output = self.shared_expert(hidden_states)
shared_expert_output = F.sigmoid(self.shared_expert_gate(hidden_states)) * shared_expert_output
final_hidden_states = final_hidden_states + shared_expert_output
final_hidden_states = final_hidden_states.reshape(batch_size, sequence_length, hidden_dim)
return final_hidden_states, router_logits
class Qwen2MoeDecoderLayer(GradientCheckpointingLayer):
def __init__(self, config: Qwen2MoeConfig, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.self_attn = QWEN2MOE_ATTENTION_CLASSES[config._attn_implementation](config, layer_idx)
if (layer_idx not in config.mlp_only_layers) and (
config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0
):
self.mlp = Qwen2MoeSparseMoeBlock(config)
else:
self.mlp = Qwen2MoeMLP(config, intermediate_size=config.intermediate_size)
self.input_layernorm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
output_router_logits: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
output_router_logits (`bool`, *optional*):
Whether or not to return the logits of all the routers. They are useful for computing the router loss,
and should not be returned during inference.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
past_key_values (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
kwargs (`dict`, *optional*):
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
into the model
"""
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
if isinstance(hidden_states, tuple):
hidden_states, router_logits = hidden_states
else:
router_logits = None
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if output_router_logits:
outputs += (router_logits,)
return outputs
@auto_docstring
class Qwen2MoePreTrainedModel(PreTrainedModel):
config: Qwen2MoeConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Qwen2MoeDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
def _init_weights(self, module):
std = self.config.initializer_range
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, Qwen2MoeRMSNorm):
module.weight.data.fill_(1.0)
@auto_docstring
class Qwen2MoeModel(Qwen2MoePreTrainedModel):
def __init__(self, config: Qwen2MoeConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[Qwen2MoeDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self._attn_implementation = config._attn_implementation
self.norm = Qwen2MoeRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = Qwen2MoeRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_router_logits: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> MoeModelOutputWithPast:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
# TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
if not isinstance(past_key_values, (type(None), Cache)):
raise ValueError("The `past_key_values` should be either a `Cache` object or `None`.")
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
)
hidden_states = inputs_embeds
# create position embeddings to be shared across the decoder layers
position_embeddings = self.rotary_emb(hidden_states, position_ids)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_router_logits = () if output_router_logits else None
for decoder_layer in self.layers:
if output_hidden_states:
all_hidden_states += (hidden_states,)
layer_outputs = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_ids=position_ids,
past_key_values=past_key_values,
output_attentions=output_attentions,
output_router_logits=output_router_logits,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attns += (layer_outputs[1],)
if output_router_logits and layer_outputs[-1] is not None:
all_router_logits += (layer_outputs[-1],)
hidden_states = self.norm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
hidden_states=all_hidden_states,
attentions=all_self_attns,
router_logits=all_router_logits,
)
# Copied from transformers.models.phimoe.modeling_phimoe.PhimoeModel._update_causal_mask with Phimoe->Qwen2Moe
def _update_causal_mask(
self,
attention_mask: Union[torch.Tensor, "BlockMask"],
input_tensor: torch.Tensor,
cache_position: torch.Tensor,
past_key_values: Cache,
output_attentions: bool = False,
):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and past_key_values is not None:
is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
if is_padding_right:
raise ValueError(
"You are attempting to perform batched generation with padding_side='right'"
" this may lead to unexpected behaviour for Flash Attention version of Qwen2Moe. Make sure to "
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
)
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
if self.config._attn_implementation == "flex_attention":
if isinstance(attention_mask, torch.Tensor):
attention_mask = make_flex_block_causal_mask(attention_mask)
return attention_mask
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
# to infer the attention mask.
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
using_static_cache = isinstance(past_key_values, StaticCache)
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
if AttentionMaskConverter._ignore_causal_mask_sdpa(
attention_mask,
inputs_embeds=input_tensor,
past_key_values_length=past_seen_tokens,
sliding_window=self.config.sliding_window,
is_training=self.training,
):
return None
dtype = input_tensor.dtype
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
# StaticCache
if using_static_cache:
target_length = past_key_values.get_max_cache_shape()
# DynamicCache or no cache
else:
target_length = (
attention_mask.shape[-1]
if isinstance(attention_mask, torch.Tensor)
else past_seen_tokens + sequence_length + 1
)
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
attention_mask,
sequence_length=sequence_length,
target_length=target_length,
dtype=dtype,
cache_position=cache_position,
batch_size=input_tensor.shape[0],
config=self.config,
past_key_values=past_key_values,
)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
and not output_attentions
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
@staticmethod
# Copied from transformers.models.phimoe.modeling_phimoe.PhimoeModel._prepare_4d_causal_attention_mask_with_cache_position with Phimoe->Qwen2Moe
def _prepare_4d_causal_attention_mask_with_cache_position(
attention_mask: torch.Tensor,
sequence_length: int,
target_length: int,
dtype: torch.dtype,
cache_position: torch.Tensor,
batch_size: int,
config: Qwen2MoeConfig,
past_key_values: Cache,
):
"""
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
Args:
attention_mask (`torch.Tensor`):
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
sequence_length (`int`):
The sequence length being processed.
target_length (`int`):
The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
dtype (`torch.dtype`):
The dtype to use for the 4D attention mask.
cache_position (`torch.Tensor`):
Indices depicting the position of the input sequence tokens in the sequence.
batch_size (`torch.Tensor`):
Batch size.
config (`Qwen2MoeConfig`):
The model's configuration class
past_key_values (`Cache`):
The cache class that is being used currently to generate
"""
if attention_mask is not None and attention_mask.dim() == 4:
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
causal_mask = attention_mask
else:
min_dtype = torch.finfo(dtype).min
causal_mask = torch.full(
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
)
diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(
-1, 1
)
text_config = config.get_text_config()
if getattr(text_config, "use_sliding_window", True) and text_config.sliding_window is not None:
# if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
# the check is needed to verify is current checkpoint was trained with sliding window or not
is_static_sliding_cache = isinstance(past_key_values, StaticCache) and all(past_key_values.is_sliding)
if not is_static_sliding_cache or sequence_length > target_length:
sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= (
cache_position.reshape(-1, 1) - text_config.sliding_window
)
diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
causal_mask *= diagonal_attend_mask
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
if attention_mask.shape[-1] > target_length:
attention_mask = attention_mask[:, :target_length]
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
causal_mask.device
)
padding_mask = padding_mask == 0
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
padding_mask, min_dtype
)
return causal_mask
class Qwen2MoeForCausalLM(Qwen2MoePreTrainedModel, GenerationMixin):
_tied_weights_keys = ["lm_head.weight"]
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = Qwen2MoeModel(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.router_aux_loss_coef = config.router_aux_loss_coef
self.num_experts = config.num_experts
self.num_experts_per_tok = config.num_experts_per_tok
# Initialize weights and apply final processing
self.post_init()
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
output_router_logits: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> MoeCausalLMOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Qwen2MoeForCausalLM
>>> model = Qwen2MoeForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)
>>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_router_logits = (
output_router_logits if output_router_logits is not None else self.config.output_router_logits
)
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs: MoeModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
output_router_logits=output_router_logits,
cache_position=cache_position,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
class Qwen2MoeForSequenceClassification(GenericForSequenceClassification, Qwen2MoePreTrainedModel): ...
class Qwen2MoeForTokenClassification(GenericForTokenClassification, Qwen2MoePreTrainedModel): ...
class Qwen2MoeForQuestionAnswering(GenericForQuestionAnswering, Qwen2MoePreTrainedModel): ...
__all__ = [
"Qwen2MoeForCausalLM",
"Qwen2MoeForQuestionAnswering",
"Qwen2MoeModel",
"Qwen2MoePreTrainedModel",
"Qwen2MoeForSequenceClassification",
"Qwen2MoeForTokenClassification",
]
| transformers/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py/0 | {
"file_path": "transformers/src/transformers/models/qwen2_moe/modeling_qwen2_moe.py",
"repo_id": "transformers",
"token_count": 23537
} | 502 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ResNet checkpoints from timm."""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Optional
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger()
@dataclass
class Tracker:
module: nn.Module
traced: list[nn.Module] = field(default_factory=list)
handles: list = field(default_factory=list)
def _forward_hook(self, m, inputs: Tensor, outputs: Tensor):
has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, (nn.Conv2d, nn.BatchNorm2d))
if has_not_submodules:
self.traced.append(m)
def __call__(self, x: Tensor):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(x)
[x.remove() for x in self.handles]
return self
@property
def parametrized(self):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced))
@dataclass
class ModuleTransfer:
src: nn.Module
dest: nn.Module
verbose: int = 0
src_skip: list = field(default_factory=list)
dest_skip: list = field(default_factory=list)
def __call__(self, x: Tensor):
"""
Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the
hood we tracked all the operations in both modules.
"""
dest_traced = Tracker(self.dest)(x).parametrized
src_traced = Tracker(self.src)(x).parametrized
src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced))
dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced))
if len(dest_traced) != len(src_traced):
raise Exception(
f"Numbers of operations are different. Source module has {len(src_traced)} operations while"
f" destination module has {len(dest_traced)}."
)
for dest_m, src_m in zip(dest_traced, src_traced):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(f"Transferred from={src_m} to={dest_m}")
def convert_weight_and_push(name: str, config: ResNetConfig, save_directory: Path, push_to_hub: bool = True):
print(f"Converting {name}...")
with torch.no_grad():
from_model = timm.create_model(name, pretrained=True).eval()
our_model = ResNetForImageClassification(config).eval()
module_transfer = ModuleTransfer(src=from_model, dest=our_model)
x = torch.randn((1, 3, 224, 224))
module_transfer(x)
assert torch.allclose(from_model(x), our_model(x).logits), "The model logits don't match the original one."
checkpoint_name = f"resnet{'-'.join(name.split('resnet'))}"
print(checkpoint_name)
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name,
commit_message="Add model",
use_temp_dir=True,
)
# we can use the convnext one
image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k")
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name,
commit_message="Add image processor",
use_temp_dir=True,
)
print(f"Pushed {checkpoint_name}")
def convert_weights_and_push(save_directory: Path, model_name: Optional[str] = None, push_to_hub: bool = True):
filename = "imagenet-1k-id2label.json"
num_labels = 1000
expected_shape = (1, num_labels)
repo_id = "huggingface/label-files"
num_labels = num_labels
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k): v for k, v in id2label.items()}
id2label = id2label
label2id = {v: k for k, v in id2label.items()}
ImageNetPreTrainedConfig = partial(ResNetConfig, num_labels=num_labels, id2label=id2label, label2id=label2id)
names_to_config = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[64, 128, 256, 512], layer_type="basic"
),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck"
),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[64, 128, 256, 512], layer_type="basic"
),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck"
),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck"
),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3], hidden_sizes=[256, 512, 1024, 2048], layer_type="bottleneck"
),
}
if model_name:
convert_weight_and_push(model_name, names_to_config[model_name], save_directory, push_to_hub)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(model_name, config, save_directory, push_to_hub)
return config, expected_shape
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
args = parser.parse_args()
pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| transformers/src/transformers/models/resnet/convert_resnet_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/resnet/convert_resnet_to_pytorch.py",
"repo_id": "transformers",
"token_count": 2993
} | 503 |
# coding=utf-8
# Copyright 2023 Bo Peng and HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch RWKV model."""
import math
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Union
import torch
import torch.utils.checkpoint
from torch import nn
from ...generation import GenerationMixin
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_utils import PreTrainedModel
from ...utils import (
ModelOutput,
auto_docstring,
is_bitsandbytes_available,
is_ninja_available,
is_torch_cuda_available,
logging,
)
from .configuration_rwkv import RwkvConfig
logger = logging.get_logger(__name__)
rwkv_cuda_kernel = None
def load_wkv_cuda_kernel(context_length):
from torch.utils.cpp_extension import load as load_kernel
global rwkv_cuda_kernel
kernel_folder = Path(__file__).resolve().parent.parent.parent / "kernels" / "rwkv"
cuda_kernel_files = [kernel_folder / f for f in ["wkv_op.cpp", "wkv_cuda.cu", "wkv_cuda_bf16.cu"]]
# Only load the kernel if it's not been loaded yet or if we changed the context length
if rwkv_cuda_kernel is not None and rwkv_cuda_kernel.max_seq_length == context_length:
return
logger.info(f"Loading CUDA kernel for RWKV at context length of {context_length}.")
flags = [
"-res-usage",
"--maxrregcount 60",
"--use_fast_math",
"-O3",
"-Xptxas -O3",
"--extra-device-vectorization",
f"-DTmax={context_length}",
]
rwkv_cuda_kernel = load_kernel(
name=f"wkv_{context_length}",
sources=cuda_kernel_files,
verbose=(logging.get_verbosity() == logging.DEBUG),
extra_cuda_cflags=flags,
)
rwkv_cuda_kernel.max_seq_length = context_length
class RwkvLinearAttention(torch.autograd.Function):
@staticmethod
def forward(ctx, time_decay, time_first, key, value, state=None, return_state=False):
batch_size, seq_len, hidden_size = key.size()
if seq_len > rwkv_cuda_kernel.max_seq_length:
raise ValueError(
f"Cannot process a batch with {seq_len} tokens at the same time, use a maximum of "
f"{rwkv_cuda_kernel.max_seq_length} with this model."
)
if batch_size * hidden_size % min(hidden_size, 32) != 0:
raise ValueError(
f"The product of batch size ({batch_size}) and hidden size ({hidden_size}) needs to be a round "
f"multiple of {min(hidden_size, 32)}."
)
ctx.input_dtype = key.dtype
if (
time_decay.device.type != "cuda"
or time_first.device.type != "cuda"
or key.device.type != "cuda"
or value.device.type != "cuda"
):
raise ValueError("Calling the CUDA kernel for wkv attention requires all tensors to be on CUDA devices.")
time_decay = -torch.exp(time_decay.float().contiguous())
if key.dtype == torch.float16:
time_first = time_first.float()
key = key.float()
value = value.float()
time_first = time_first.contiguous()
key = key.contiguous()
value = value.contiguous()
# The CUDA kernel will fill this tensor.
output = torch.empty_like(key, memory_format=torch.contiguous_format)
if return_state or state is not None:
if state is None:
state = torch.zeros(
batch_size,
hidden_size,
3,
dtype=torch.float32,
device=key.device,
memory_format=torch.contiguous_format,
)
state[:, :, 2] -= 1e38
else:
state = torch.cat([s.unsqueeze(2) for s in state], dim=2).contiguous()
if key.dtype == torch.bfloat16:
forward_func = rwkv_cuda_kernel.forward_with_state_bf16
else:
forward_func = rwkv_cuda_kernel.forward_with_state
forward_func(time_decay, time_first, key, value, output, state)
else:
forward_func = rwkv_cuda_kernel.forward_bf16 if key.dtype == torch.bfloat16 else rwkv_cuda_kernel.forward
forward_func(time_decay, time_first, key, value, output)
ctx.save_for_backward(time_decay, time_first, key, value, output)
if state is not None:
state = [s.squeeze(2) for s in torch.chunk(state, 3, dim=2)]
return output.to(ctx.input_dtype), state
@staticmethod
# g stands for grad
def backward(ctx, g_output, g_state=None):
input_dtype = ctx.input_dtype
time_decay, time_first, key, value, output = ctx.saved_tensors
# The CUDA kernel will fill those tensors.
g_time_decay = torch.empty_like(
time_decay,
memory_format=torch.contiguous_format,
dtype=torch.bfloat16 if input_dtype == torch.bfloat16 else torch.float32,
)
g_time_first = torch.empty_like(time_first, memory_format=torch.contiguous_format)
g_key = torch.empty_like(key, memory_format=torch.contiguous_format)
g_value = torch.empty_like(value, memory_format=torch.contiguous_format)
if input_dtype == torch.float16:
g_output = g_output.float()
backward_func = rwkv_cuda_kernel.backward_bf16 if input_dtype == torch.bfloat16 else rwkv_cuda_kernel.backward
backward_func(
time_decay,
time_first,
key,
value,
output,
g_output.contiguous(),
g_time_decay,
g_time_first,
g_key,
g_value,
)
return (
g_time_decay.to(input_dtype),
g_time_first.to(input_dtype),
g_key.to(input_dtype),
g_value.to(input_dtype),
None,
None,
)
def rwkv_linear_attention_cpu(time_decay, time_first, key, value, state=None, return_state=False):
# For CPU fallback. Will be slower and probably take more memory than the custom CUDA kernel if not executed
# within a torch.no_grad.
_, seq_length, _ = key.size()
output = torch.zeros_like(key)
if state is None:
num_state = torch.zeros_like(key[:, 0], dtype=torch.float32)
den_state = torch.zeros_like(key[:, 0], dtype=torch.float32)
max_state = torch.zeros_like(key[:, 0], dtype=torch.float32) - 1e38
else:
num_state, den_state, max_state = state
# For numerical stability
# real_numerator_state = num_state * torch.exp(max_state)
# real_denominator_state = den_state * torch.exp(max_state)
time_decay = -torch.exp(time_decay)
for current_index in range(seq_length):
current_key = key[:, current_index].float()
current_value = value[:, current_index]
# wkv computation at time t
max_for_output = torch.maximum(max_state, current_key + time_first)
e1 = torch.exp(max_state - max_for_output)
e2 = torch.exp(current_key + time_first - max_for_output)
numerator = e1 * num_state + e2 * current_value
denominator = e1 * den_state + e2
output[:, current_index] = (numerator / denominator).to(output.dtype)
# Update state for next iteration
max_for_state = torch.maximum(max_state + time_decay, current_key)
e1 = torch.exp(max_state + time_decay - max_for_state)
e2 = torch.exp(current_key - max_for_state)
num_state = e1 * num_state + e2 * current_value
den_state = e1 * den_state + e2
max_state = max_for_state
if return_state or state is not None:
state = [num_state, den_state, max_state]
return output, state
def rwkv_linear_attention(time_decay, time_first, key, value, state=None, return_state=False):
no_cuda = any(t.device.type != "cuda" for t in [time_decay, time_first, key, value])
# Launching the CUDA kernel for just one token will actually be slower (there is no for loop in the CPU version
# in this case).
one_token = key.size(1) == 1
if rwkv_cuda_kernel is None or no_cuda or one_token:
return rwkv_linear_attention_cpu(time_decay, time_first, key, value, state=state, return_state=return_state)
else:
return RwkvLinearAttention.apply(time_decay, time_first, key, value, state, return_state)
class RwkvSelfAttention(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.config = config
kernel_loaded = rwkv_cuda_kernel is not None and rwkv_cuda_kernel.max_seq_length == config.context_length
if is_ninja_available() and is_torch_cuda_available() and not kernel_loaded:
try:
load_wkv_cuda_kernel(config.context_length)
except Exception:
logger.info("Could not load the custom CUDA kernel for RWKV attention.")
self.layer_id = layer_id
hidden_size = config.hidden_size
attention_hidden_size = (
config.attention_hidden_size if config.attention_hidden_size is not None else hidden_size
)
self.attention_hidden_size = attention_hidden_size
self.time_decay = nn.Parameter(torch.empty(attention_hidden_size))
self.time_first = nn.Parameter(torch.empty(attention_hidden_size))
self.time_mix_key = nn.Parameter(torch.empty(1, 1, hidden_size))
self.time_mix_value = nn.Parameter(torch.empty(1, 1, hidden_size))
self.time_mix_receptance = nn.Parameter(torch.empty(1, 1, hidden_size))
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
self.key = nn.Linear(hidden_size, attention_hidden_size, bias=False)
self.value = nn.Linear(hidden_size, attention_hidden_size, bias=False)
self.receptance = nn.Linear(hidden_size, attention_hidden_size, bias=False)
self.output = nn.Linear(attention_hidden_size, hidden_size, bias=False)
# TODO: maybe jit, otherwise move inside forward
def extract_key_value(self, hidden, state=None):
# Mix hidden with the previous timestep to produce key, value, receptance
if hidden.size(1) == 1 and state is not None:
shifted = state[1][:, :, self.layer_id]
else:
shifted = self.time_shift(hidden)
if state is not None:
shifted[:, 0] = state[1][:, :, self.layer_id]
key = hidden * self.time_mix_key + shifted * (1 - self.time_mix_key)
value = hidden * self.time_mix_value + shifted * (1 - self.time_mix_value)
receptance = hidden * self.time_mix_receptance + shifted * (1 - self.time_mix_receptance)
key = self.key(key)
value = self.value(value)
receptance = torch.sigmoid(self.receptance(receptance))
if state is not None:
state[1][:, :, self.layer_id] = hidden[:, -1]
return receptance, key, value, state
def forward(self, hidden, state=None, use_cache=False):
receptance, key, value, state = self.extract_key_value(hidden, state=state)
layer_state = tuple(s[:, :, self.layer_id] for s in state[2:]) if state is not None else None
rwkv, layer_state = rwkv_linear_attention(
self.time_decay,
self.time_first,
key,
value,
state=layer_state,
return_state=use_cache,
)
if layer_state is not None:
state[2][:, :, self.layer_id] = layer_state[0]
state[3][:, :, self.layer_id] = layer_state[1]
state[4][:, :, self.layer_id] = layer_state[2]
return self.output(receptance * rwkv), state
class RwkvFeedForward(nn.Module):
def __init__(self, config, layer_id=0):
super().__init__()
self.config = config
self.layer_id = layer_id
hidden_size = config.hidden_size
intermediate_size = (
config.intermediate_size if config.intermediate_size is not None else 4 * config.hidden_size
)
self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))
self.time_mix_key = nn.Parameter(torch.empty(1, 1, hidden_size))
self.time_mix_receptance = nn.Parameter(torch.empty(1, 1, hidden_size))
self.key = nn.Linear(hidden_size, intermediate_size, bias=False)
self.receptance = nn.Linear(hidden_size, hidden_size, bias=False)
self.value = nn.Linear(intermediate_size, hidden_size, bias=False)
def forward(self, hidden, state=None):
if hidden.size(1) == 1 and state is not None:
shifted = state[0][:, :, self.layer_id]
else:
shifted = self.time_shift(hidden)
if state is not None:
shifted[:, 0] = state[0][:, :, self.layer_id]
key = hidden * self.time_mix_key + shifted * (1 - self.time_mix_key)
receptance = hidden * self.time_mix_receptance + shifted * (1 - self.time_mix_receptance)
key = torch.square(torch.relu(self.key(key)))
value = self.value(key)
receptance = torch.sigmoid(self.receptance(receptance))
if state is not None:
state[0][:, :, self.layer_id] = hidden[:, -1]
return receptance * value, state
class RwkvBlock(GradientCheckpointingLayer):
def __init__(self, config, layer_id):
super().__init__()
self.config = config
self.layer_id = layer_id
if layer_id == 0:
self.pre_ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.ln1 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.ln2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_epsilon)
self.attention = RwkvSelfAttention(config, layer_id)
self.feed_forward = RwkvFeedForward(config, layer_id)
def forward(self, hidden, state=None, use_cache=False, output_attentions=False):
if self.layer_id == 0:
hidden = self.pre_ln(hidden)
attention, state = self.attention(self.ln1(hidden), state=state, use_cache=use_cache)
hidden = hidden + attention
feed_forward, state = self.feed_forward(self.ln2(hidden), state=state)
hidden = hidden + feed_forward
outputs = (hidden, state)
if output_attentions:
outputs += (attention,)
else:
outputs += (None,)
return outputs
@auto_docstring
class RwkvPreTrainedModel(PreTrainedModel):
config: RwkvConfig
base_model_prefix = "rwkv"
_no_split_modules = ["RwkvBlock"]
_keep_in_fp32_modules = ["time_decay", "time_first"]
supports_gradient_checkpointing = True
_is_stateful = True
def _init_weights(self, module: nn.Module):
"""Initialize the weights."""
if isinstance(module, RwkvSelfAttention):
layer_id = module.layer_id
num_hidden_layers = module.config.num_hidden_layers
hidden_size = module.config.hidden_size
attention_hidden_size = module.attention_hidden_size
ratio_0_to_1 = layer_id / (num_hidden_layers - 1) # 0 to 1
ratio_1_to_almost0 = 1.0 - (layer_id / num_hidden_layers) # 1 to ~0
time_weight = torch.tensor(
[i / hidden_size for i in range(hidden_size)],
dtype=module.time_mix_key.dtype,
device=module.time_mix_key.device,
)
time_weight = time_weight[None, None, :]
decay_speed = [
-5 + 8 * (h / (attention_hidden_size - 1)) ** (0.7 + 1.3 * ratio_0_to_1)
for h in range(attention_hidden_size)
]
decay_speed = torch.tensor(decay_speed, dtype=module.time_decay.dtype, device=module.time_decay.device)
zigzag = (
torch.tensor(
[(i + 1) % 3 - 1 for i in range(attention_hidden_size)],
dtype=module.time_first.dtype,
device=module.time_first.device,
)
* 0.5
)
module.time_decay.data = decay_speed
module.time_first.data = torch.ones_like(module.time_first * math.log(0.3) + zigzag)
module.time_mix_key.data = torch.pow(time_weight, ratio_1_to_almost0)
module.time_mix_value.data = torch.pow(time_weight, ratio_1_to_almost0) + 0.3 * ratio_0_to_1
module.time_mix_receptance.data = torch.pow(time_weight, 0.5 * ratio_1_to_almost0)
elif isinstance(module, RwkvFeedForward):
layer_id = module.layer_id
num_hidden_layers = module.config.num_hidden_layers
hidden_size = module.config.hidden_size
ratio_1_to_almost0 = 1.0 - (layer_id / num_hidden_layers) # 1 to ~0
time_weight = torch.tensor(
[i / hidden_size for i in range(hidden_size)],
dtype=module.time_mix_key.dtype,
device=module.time_mix_key.device,
)
time_weight = time_weight[None, None, :]
module.time_mix_key.data = torch.pow(time_weight, ratio_1_to_almost0)
module.time_mix_receptance.data = torch.pow(time_weight, ratio_1_to_almost0)
elif isinstance(module, nn.Linear):
shape = module.weight.data.shape
gain = 1.0
scale = 1.0 # extra scale for gain
if module.bias is not None:
module.bias.data.zero_()
if shape[0] > shape[1]:
gain = math.sqrt(shape[0] / shape[1])
if shape[0] == self.config.vocab_size and shape[1] == self.config.hidden_size: # final projection?
scale = 0.5
gain *= scale
nn.init.orthogonal_(module.weight, gain=gain)
elif isinstance(module, nn.Embedding):
shape = module.weight.data.shape
gain = 1e-4 * math.sqrt(max(shape[0], shape[1]))
nn.init.orthogonal_(module.weight, gain=gain)
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
@dataclass
@auto_docstring(
custom_intro="""
Class for the RWKV model outputs.
"""
)
class RwkvOutput(ModelOutput):
r"""
state (list of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`):
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
avoid providing the old `input_ids`.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
state: Optional[list[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for causal language model (or autoregressive) outputs.
"""
)
class RwkvCausalLMOutput(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
state (list of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`):
The state of the model at the last time step. Can be used in a forward method with the next `input_ids` to
avoid providing the old `input_ids`.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
state: Optional[list[torch.FloatTensor]] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@auto_docstring
class RwkvModel(RwkvPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.blocks = nn.ModuleList([RwkvBlock(config, layer_id=idx) for idx in range(config.num_hidden_layers)])
self.ln_out = nn.LayerNorm(config.hidden_size)
self.layers_are_rescaled = False
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, new_embeddings):
self.embeddings = new_embeddings
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None, # noqa
inputs_embeds: Optional[torch.FloatTensor] = None,
state: Optional[list[torch.FloatTensor]] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, RwkvOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
state (tuple of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`, *optional*):
If passed along, the model uses the previous state in all the blocks (which will give the output for the
`input_ids` provided as if the model add `state_input_ids + input_ids` as context).
use_cache (`bool`, *optional*):
If set to `True`, the last state is returned and can be used to quickly generate the next logits.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else (self.config.use_cache if not self.training else False)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if attention_mask is not None:
logger.warning_once("`attention_mask` was passed, but it is unused in this model.")
if self.training == self.layers_are_rescaled:
self._rescale_layers()
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is None and inputs_embeds is None:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embeddings(input_ids)
if use_cache and state is None:
shape = (inputs_embeds.size(0), self.config.hidden_size, self.config.num_hidden_layers)
state = [
torch.zeros(
*shape, dtype=inputs_embeds.dtype if i <= 1 else torch.float32, device=inputs_embeds.device
)
for i in range(5)
]
state[4] -= 1e30
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
hidden_states = inputs_embeds
all_self_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for idx, block in enumerate(self.blocks):
hidden_states, state, attentions = block(
hidden_states, state=state, use_cache=use_cache, output_attentions=output_attentions
)
if (
self.layers_are_rescaled
and self.config.rescale_every > 0
and (idx + 1) % self.config.rescale_every == 0
):
hidden_states = hidden_states / 2
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if output_attentions:
all_self_attentions = all_self_attentions + (attentions,)
hidden_states = self.ln_out(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(x for x in [hidden_states, state, all_hidden_states, all_self_attentions] if x is not None)
return RwkvOutput(
last_hidden_state=hidden_states,
state=state,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
def _rescale_layers(self):
# Layers should be rescaled for inference only.
if self.layers_are_rescaled == (not self.training):
return
if self.config.rescale_every > 0:
with torch.no_grad():
for block_id, block in enumerate(self.blocks):
if self.training:
block.attention.output.weight.mul_(2 ** int(block_id // self.config.rescale_every))
block.feed_forward.value.weight.mul_(2 ** int(block_id // self.config.rescale_every))
else:
# Deal with quantization statistics
if hasattr(block.attention.output.weight, "SCB"):
block.attention.output.weight.SCB.div_(2 ** int(block_id // self.config.rescale_every))
block.feed_forward.value.weight.SCB.div_(2 ** int(block_id // self.config.rescale_every))
elif hasattr(block.attention.output.weight, "quant_state"):
self._bnb_4bit_dequantize_and_rescale(block.attention.output, block_id)
self._bnb_4bit_dequantize_and_rescale(block.feed_forward.value, block_id)
else:
block.attention.output.weight.div_(2 ** int(block_id // self.config.rescale_every))
block.feed_forward.value.weight.div_(2 ** int(block_id // self.config.rescale_every))
self.layers_are_rescaled = not self.training
def _bnb_4bit_dequantize_and_rescale(self, target_layer, block_id):
r"""
Perform the dequantization and rescaling of the weights of a given layer. After that operation the layer will
be quantized again.
"""
if not is_bitsandbytes_available():
raise ImportError("Please install bitsandbytes to use this method.")
import bitsandbytes as bnb
dequant_weights = bnb.functional.dequantize_4bit(target_layer.weight.data, target_layer.weight.quant_state)
dequant_weights.div_(2 ** int(block_id // self.config.rescale_every))
# re-quantize the model:
# we need to put it first on CPU then back to the device
# this will create an overhead :/
# We set requires_grad=False as we cannot compute gradients on top of 4bit parameters anyway and to avoid
# bugs with bnb
quant_weight = bnb.nn.Params4bit(dequant_weights.to("cpu"), requires_grad=False).to(dequant_weights.device)
setattr(target_layer, "weight", quant_weight)
@auto_docstring(
custom_intro="""
The RWKV Model transformer with a language modeling head on top (linear layer with weights tied to the input
embeddings).
"""
)
class RwkvForCausalLM(RwkvPreTrainedModel, GenerationMixin):
_tied_weights_keys = ["head.weight"]
def __init__(self, config):
super().__init__(config)
self.rwkv = RwkvModel(config)
self.head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.head
def set_output_embeddings(self, new_embeddings):
self.head = new_embeddings
def prepare_inputs_for_generation(self, input_ids, state=None, inputs_embeds=None, use_cache=None, **kwargs):
# Overwritten -- this model uses `state`, but doesn't have a cache (`past_key_values`)
# only last token for inputs_ids if the state is passed along.
if state is not None:
input_ids = input_ids[:, -1].unsqueeze(-1)
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and state is None:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids}
model_inputs["state"] = state
model_inputs["use_cache"] = use_cache
return model_inputs
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.LongTensor] = None, # noqa
inputs_embeds: Optional[torch.FloatTensor] = None,
state: Optional[list[torch.FloatTensor]] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
**kwargs,
) -> Union[tuple, RwkvCausalLMOutput]:
r"""
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
sequence tokens in the vocabulary.
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
`input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
state (tuple of five `torch.FloatTensor` of shape `(batch_size, hidden_size, num_hidden_layers)`, *optional*):
If passed along, the model uses the previous state in all the blocks (which will give the output for the
`input_ids` provided as if the model add `state_input_ids + input_ids` as context).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
use_cache (`bool`, *optional*):
If set to `True`, the last state is returned and can be used to quickly generate the next logits.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
rwkv_outputs = self.rwkv(
input_ids,
inputs_embeds=inputs_embeds,
state=state,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = rwkv_outputs[0]
logits = self.head(hidden_states)
loss = None
if labels is not None:
loss = self.loss_function(
logits,
labels,
vocab_size=self.config.vocab_size,
**kwargs,
)
if not return_dict:
output = (logits,) + rwkv_outputs[1:]
return ((loss,) + output) if loss is not None else output
return RwkvCausalLMOutput(
loss=loss,
logits=logits,
state=rwkv_outputs.state,
hidden_states=rwkv_outputs.hidden_states,
attentions=rwkv_outputs.attentions,
)
__all__ = ["RwkvForCausalLM", "RwkvModel", "RwkvPreTrainedModel"]
| transformers/src/transformers/models/rwkv/modeling_rwkv.py/0 | {
"file_path": "transformers/src/transformers/models/rwkv/modeling_rwkv.py",
"repo_id": "transformers",
"token_count": 14885
} | 504 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Feature extractor class for SeamlessM4T
"""
from typing import Optional, Union
import numpy as np
from ...utils import is_torch_available
if is_torch_available():
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
logger = logging.get_logger(__name__)
class SeamlessM4TFeatureExtractor(SequenceFeatureExtractor):
r"""
Constructs a SeamlessM4T feature extractor.
This feature extractor inherits from [`SequenceFeatureExtractor`] which contains most of the main methods. Users
should refer to this superclass for more information regarding those methods.
This class extracts mel-filter bank features from raw speech.
Args:
feature_size (`int`, *optional*, defaults to 80):
The feature dimension of the extracted features.
sampling_rate (`int`, *optional*, defaults to 16000):
The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
num_mel_bins (`int`, *optional*, defaults to 80):
Number of Mel-frequency bins.
padding_value (`float`, *optional*, defaults to 0.0):
The value that is used to fill the padding vectors.
stride (`int`, *optional*, defaults to 2):
Stride used to reshape audios from shape (batch_size,num_frames,num_mel_bins) to
(batch_size,num_frames//stride,num_mel_bins*stride).
"""
model_input_names = ["input_features", "attention_mask"]
def __init__(
self,
feature_size=80,
sampling_rate=16000,
num_mel_bins=80,
padding_value=0.0,
stride=2,
**kwargs,
):
self.num_mel_bins = num_mel_bins
self.return_attention_mask = True
self.stride = stride
mel_filters = mel_filter_bank(
num_frequency_bins=257,
num_mel_filters=self.num_mel_bins,
min_frequency=20,
max_frequency=sampling_rate // 2,
sampling_rate=sampling_rate,
norm=None,
mel_scale="kaldi",
triangularize_in_mel_space=True,
)
self.mel_filters = mel_filters
self.window = window_function(400, "povey", periodic=False)
super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def zero_mean_unit_var_norm(
input_values: list[np.ndarray], attention_mask: list[np.ndarray], padding_value: float = 0.0
) -> list[np.ndarray]:
"""
Every array in the list is normalized to have zero mean and unit variance
"""
if attention_mask is not None:
attention_mask = np.array(attention_mask, np.int32)
normed_input_values = []
for vector, length in zip(input_values, attention_mask.sum(-1)):
normed_slice = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
normed_slice[length:] = padding_value
normed_input_values.append(normed_slice)
else:
normed_input_values = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def _extract_fbank_features(
self,
waveform: np.ndarray,
) -> np.ndarray:
"""
Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
and hence the waveform should not be normalized before feature extraction.
"""
# by default, it extracts the left channel if stereo
if len(waveform.shape) == 2:
waveform = waveform[0]
waveform = np.squeeze(waveform) * (2**15) # Kaldi compliance: 16-bit signed integers
features = spectrogram(
waveform,
self.window,
frame_length=400,
hop_length=160,
fft_length=512,
power=2.0,
center=False,
preemphasis=0.97,
mel_filters=self.mel_filters,
log_mel="log",
mel_floor=1.192092955078125e-07,
remove_dc_offset=True,
).T
return features
def __call__(
self,
raw_speech: Union[np.ndarray, list[float], list[np.ndarray], list[list[float]]],
padding: Union[bool, str, PaddingStrategy] = True,
pad_to_multiple_of: Optional[int] = 2,
max_length: Optional[int] = None,
truncation: bool = False,
return_tensors: Optional[Union[str, TensorType]] = None,
sampling_rate: Optional[int] = None,
return_attention_mask: Optional[bool] = None,
do_normalize_per_mel_bins: Optional[bool] = True,
**kwargs,
) -> BatchFeature:
"""
Main method to featurize and prepare for the model one or several sequence(s).
Args:
raw_speech (`np.ndarray`, `torch.Tensor`, `list[float]`, `list[np.ndarray]`, `list[torch.Tensor]`,
`list[list[float]]`, `list[list[list[float]]]`):
The sequence or batch of sequences to be padded. Each sequence can be a numpy array,
a torch tensor, a list of float values, a list of numpy arrays, a list of torch tensors,
a list of list of float values or a list of a list of list of float values.
If `raw_speech` is a one-dimensional `np.ndarray`, `torch.Tensor` or a `list[float]`, `raw_speech` is
considered a single-channel, single-sample sound. In all other cases, the first dimension of
`raw_speech`, whether from an `np.ndarray`, a `torch.Tensor` or a `list[...]`,
corresponds to the number of samples in the batch, and the number of channels
(i.e. mono or stereo character) is derived from the other dimensions
(1D -> single-channel waveform batches; 2D-> stereo-channel waveform batches).
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
pad_to_multiple_of (`int`, *optional*, defaults to 2):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
`>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`):
Activates truncation to cut input sequences longer than *max_length* to *max_length*.
return_attention_mask (`bool`, *optional*):
Whether to return the attention mask. If left to the default, will return the attention mask according
to the specific feature_extractor's default.
[What are attention masks?](../glossary#attention-mask)
<Tip>
For SeamlessM4T models, `attention_mask` should always be passed for batched inference, to avoid subtle
bugs.
</Tip>
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
sampling_rate (`int`, *optional*):
The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
`sampling_rate` at the forward call to prevent silent errors.
do_normalize_per_mel_bins (`bool`, *optional*, defaults to `True`):
Whether or not to zero-mean unit-variance normalize the input per mel-channel.
kwargs (*optional*):
Remaining dictionary of keyword arguments that will be passed to the tokenizer or the feature
extractor.
"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}."
)
else:
logger.warning(
f"It is strongly recommended to pass the `sampling_rate` argument to `{self.__class__.__name__}()`. "
"Failing to do so can result in silent errors that might be hard to debug."
)
return_attention_mask = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 3:
raise ValueError(f"Only mono-channel or stereo-channel audio is supported for input to {self}")
acceptable_types = (
(torch.Tensor, np.ndarray, tuple, list) if is_torch_available() else (np.ndarray, tuple, list)
)
is_batched = is_batched_numpy or (
isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], acceptable_types))
)
if is_batched:
raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
elif not is_batched and not isinstance(raw_speech, np.ndarray):
raw_speech = np.asarray(raw_speech, dtype=np.float32)
elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
raw_speech = raw_speech.astype(np.float32)
# always return batch
if not is_batched:
raw_speech = [raw_speech]
# extract fbank features
features = [self._extract_fbank_features(waveform) for waveform in raw_speech]
if do_normalize_per_mel_bins:
# torch defaults to ddof=1, and numpy defaults to ddof=0
features = [
(x - np.expand_dims(x.mean(0), 0)) / np.sqrt(np.expand_dims(x.var(0, ddof=1), 0) + 1e-7)
for x in features
]
# convert into correct format for padding
encoded_inputs = BatchFeature({"input_features": features})
padded_inputs = self.pad(
encoded_inputs,
padding=padding,
max_length=max_length,
truncation=truncation,
pad_to_multiple_of=pad_to_multiple_of,
return_attention_mask=True,
return_tensors="np",
)
# SeamlessM4T needs to process extracted features
input_features = padded_inputs.get("input_features")
attention_mask = padded_inputs.pop("attention_mask")
batch_size, num_frames, num_channels = input_features.shape
remainder = num_frames % self.stride
if remainder != 0:
input_features = input_features[:, : num_frames - remainder, :]
attention_mask = attention_mask[:, : num_frames - remainder]
input_features = np.reshape(
input_features, (batch_size, num_frames // self.stride, num_channels * self.stride)
)
indices = np.arange(0, num_frames - remainder)
attention_mask = attention_mask[:, indices % self.stride == 1]
padded_inputs["input_features"] = input_features
if return_attention_mask:
padded_inputs["attention_mask"] = attention_mask
if return_tensors is not None:
padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
return padded_inputs
__all__ = ["SeamlessM4TFeatureExtractor"]
| transformers/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py/0 | {
"file_path": "transformers/src/transformers/models/seamless_m4t/feature_extraction_seamless_m4t.py",
"repo_id": "transformers",
"token_count": 5765
} | 505 |
# coding=utf-8
# Copyright 2021 ASAPP Inc. and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch SEW model."""
import math
import warnings
from typing import Optional, Union
import torch
import torch.utils.checkpoint
from torch import nn
from ...activations import ACT2FN
from ...integrations.deepspeed import is_deepspeed_zero3_enabled
from ...integrations.fsdp import is_fsdp_managed_module
from ...modeling_outputs import BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring
from ..wav2vec2.modeling_wav2vec2 import (
Wav2Vec2Attention,
Wav2Vec2EncoderLayer,
Wav2Vec2FeatureEncoder,
Wav2Vec2FeedForward,
Wav2Vec2ForCTC,
Wav2Vec2ForSequenceClassification,
Wav2Vec2GroupNormConvLayer,
Wav2Vec2LayerNormConvLayer,
Wav2Vec2NoLayerNormConvLayer,
Wav2Vec2SamePadLayer,
_compute_mask_indices,
)
from .configuration_sew import SEWConfig
_HIDDEN_STATES_START_POSITION = 1
class SEWNoLayerNormConvLayer(Wav2Vec2NoLayerNormConvLayer):
pass
class SEWLayerNormConvLayer(Wav2Vec2LayerNormConvLayer):
pass
class SEWGroupNormConvLayer(Wav2Vec2GroupNormConvLayer):
pass
class SEWPositionalConvEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
self.conv = nn.Conv1d(
config.hidden_size,
config.hidden_size,
kernel_size=config.num_conv_pos_embeddings,
padding=config.num_conv_pos_embeddings // 2,
groups=config.num_conv_pos_embedding_groups,
stride=config.squeeze_factor,
)
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
if is_deepspeed_zero3_enabled():
import deepspeed
with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
self.conv = weight_norm(self.conv, name="weight", dim=2)
if hasattr(self.conv, "parametrizations"):
weight_g = self.conv.parametrizations.weight.original0
weight_v = self.conv.parametrizations.weight.original1
else:
weight_g = self.conv.weight_g
weight_v = self.conv.weight_v
deepspeed.zero.register_external_parameter(self, weight_v)
deepspeed.zero.register_external_parameter(self, weight_g)
else:
self.conv = weight_norm(self.conv, name="weight", dim=2)
self.padding = SEWSamePadLayer(config.num_conv_pos_embeddings)
self.activation = ACT2FN[config.feat_extract_activation]
def forward(self, hidden_states):
hidden_states = self.conv(hidden_states)
hidden_states = self.padding(hidden_states)
hidden_states = self.activation(hidden_states)
return hidden_states
class SEWSamePadLayer(Wav2Vec2SamePadLayer):
pass
class SEWUpsampling(nn.Module):
def __init__(self, config):
super().__init__()
self.projection = nn.Linear(config.hidden_size, config.hidden_size * config.squeeze_factor)
self.activation = ACT2FN[config.feat_extract_activation]
self.squeeze_factor = config.squeeze_factor
def forward(self, hidden_states):
hidden_states = self.projection(hidden_states)
hidden_states = self.activation(hidden_states)
if self.squeeze_factor > 1:
# transform embedding channels to sequence length
bsz, src_len, src_embed_dim = hidden_states.size()
tgt_len = src_len * self.squeeze_factor
tgt_embed_dim = src_embed_dim // self.squeeze_factor
hidden_states = hidden_states.reshape(bsz, src_len, self.squeeze_factor, tgt_embed_dim)
hidden_states = hidden_states.reshape(bsz, tgt_len, tgt_embed_dim)
return hidden_states
class SEWFeatureEncoder(Wav2Vec2FeatureEncoder):
pass
class SEWFeatureExtractor(SEWFeatureEncoder):
def __init__(self, config):
super().__init__(config)
warnings.warn(
f"The class `{self.__class__.__name__}` has been depreciated "
"and will be removed in Transformers v5. "
f"Use `{self.__class__.__bases__[0].__name__}` instead.",
FutureWarning,
)
class SEWAttention(Wav2Vec2Attention):
pass
class SEWFeedForward(Wav2Vec2FeedForward):
pass
class SEWEncoderLayer(Wav2Vec2EncoderLayer):
pass
class SEWEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.pos_conv_embed = SEWPositionalConvEmbedding(config)
self.pool = nn.AvgPool1d(config.squeeze_factor, config.squeeze_factor)
self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout)
self.layers = nn.ModuleList([SEWEncoderLayer(config) for _ in range(config.num_hidden_layers)])
self.upsample = SEWUpsampling(config)
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
if attention_mask is not None:
expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
if self.config._attn_implementation == "flash_attention_2":
# make sure padded tokens output 0
hidden_states[~expand_attention_mask] = 0.0
# 2d mask is passed through the layers
attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
else:
# make sure padded tokens output 0
hidden_states[~expand_attention_mask] = 0.0
input_lengths = (attention_mask.long()).sum(-1)
# apply pooling formula to get real output_lengths
output_lengths = input_lengths // self.config.squeeze_factor
max_encoder_length = hidden_states.shape[1] // self.config.squeeze_factor
attention_ids = (
torch.arange(0, max_encoder_length, device=output_lengths.device)
.view(1, -1)
.expand(output_lengths.shape[0], -1)
)
attention_mask = (attention_ids < output_lengths.view(-1, 1)).long()
# extend attention_mask
attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
attention_mask = attention_mask.expand(
attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
)
n_input_timesteps = hidden_states.shape[1]
hidden_states = hidden_states.transpose(1, 2)
position_embeddings = self.pos_conv_embed(hidden_states)
pooled_hidden_states = self.pool(hidden_states)
min_length = min(position_embeddings.size(-1), pooled_hidden_states.size(-1))
hidden_states = pooled_hidden_states[..., :min_length] + position_embeddings[..., :min_length]
hidden_states = hidden_states.transpose(1, 2)
hidden_states = self.layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
for layer in self.layers:
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
dropout_probability = torch.rand([])
skip_the_layer = self.training and dropout_probability < self.config.layerdrop
if not skip_the_layer or synced_gpus:
# under fsdp or deepspeed zero3 all gpus must run in sync
layer_outputs = layer(
hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
)
hidden_states = layer_outputs[0]
if skip_the_layer:
layer_outputs = (None, None)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
hidden_states = self.upsample(hidden_states)
if hidden_states.shape[1] < n_input_timesteps:
hidden_states = nn.functional.pad(hidden_states, (0, 0, 0, n_input_timesteps - hidden_states.shape[1]))
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
)
@auto_docstring
class SEWPreTrainedModel(PreTrainedModel):
config: SEWConfig
base_model_prefix = "sew"
main_input_name = "input_values"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = False # needs a proper look into the mask creation
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, SEWPositionalConvEmbedding):
nn.init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
nn.init.constant_(module.conv.bias, 0)
elif isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
if is_deepspeed_zero3_enabled():
import deepspeed
if hasattr(module, "weight_v") and hasattr(module, "weight_g"):
with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
nn.init.kaiming_normal_(module.weight.data)
else:
with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
nn.init.kaiming_normal_(module.weight.data)
else:
nn.init.kaiming_normal_(module.weight.data)
if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None:
module.bias.data.zero_()
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
@auto_docstring
class SEWModel(SEWPreTrainedModel):
def __init__(self, config: SEWConfig):
super().__init__(config)
self.config = config
self.feature_extractor = SEWFeatureEncoder(config)
self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
self.project_features = config.conv_dim[-1] != config.hidden_size
if self.project_features:
self.feature_projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
self.feature_dropout = nn.Dropout(config.feat_proj_dropout)
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
self.encoder = SEWEncoder(config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
def _mask_hidden_states(
self,
hidden_states: torch.FloatTensor,
mask_time_indices: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.LongTensor] = None,
):
"""
Masks extracted features along time axis and/or along feature axis according to
[SpecAugment](https://huggingface.co/papers/1904.08779).
"""
# `config.apply_spec_augment` can set masking to False
if not getattr(self.config, "apply_spec_augment", True):
return hidden_states
# generate indices & apply SpecAugment along time axis
batch_size, sequence_length, hidden_size = hidden_states.size()
if mask_time_indices is not None:
# apply SpecAugment along time axis with given mask_time_indices
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
elif self.config.mask_time_prob > 0 and self.training:
mask_time_indices = _compute_mask_indices(
(batch_size, sequence_length),
mask_prob=self.config.mask_time_prob,
mask_length=self.config.mask_time_length,
attention_mask=attention_mask,
min_masks=self.config.mask_time_min_masks,
)
mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
if self.config.mask_feature_prob > 0 and self.training:
# generate indices & apply SpecAugment along feature axis
mask_feature_indices = _compute_mask_indices(
(batch_size, hidden_size),
mask_prob=self.config.mask_feature_prob,
mask_length=self.config.mask_feature_length,
min_masks=self.config.mask_feature_min_masks,
)
mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
hidden_states[mask_feature_indices] = 0
return hidden_states
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutput]:
r"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
extract_features = self.layer_norm(extract_features)
if self.project_features:
extract_features = self.feature_projection(extract_features)
hidden_states = self.feature_dropout(extract_features)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states,) + encoder_outputs[1:]
return BaseModelOutput(
last_hidden_state=hidden_states,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class SEWForCTC(Wav2Vec2ForCTC):
pass
class SEWForSequenceClassification(Wav2Vec2ForSequenceClassification):
pass
__all__ = ["SEWForCTC", "SEWForSequenceClassification", "SEWModel", "SEWPreTrainedModel"]
| transformers/src/transformers/models/sew/modular_sew.py/0 | {
"file_path": "transformers/src/transformers/models/sew/modular_sew.py",
"repo_id": "transformers",
"token_count": 8233
} | 506 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Image/Text processor class for SigLIP.
"""
from typing import Optional, Union
from ...feature_extraction_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SiglipProcessor(ProcessorMixin):
r"""
Constructs a Siglip processor which wraps a Siglip image processor and a Siglip tokenizer into a single processor.
[`SiglipProcessor`] offers all the functionalities of [`SiglipImageProcessor`] and [`SiglipTokenizer`]. See the
[`~SiglipProcessor.__call__`] and [`~SiglipProcessor.decode`] for more information.
Args:
image_processor ([`SiglipImageProcessor`]):
The image processor is a required input.
tokenizer ([`SiglipTokenizer`]):
The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = ("SiglipImageProcessor", "SiglipImageProcessorFast")
tokenizer_class = "AutoTokenizer"
def __init__(self, image_processor, tokenizer):
super().__init__(image_processor, tokenizer)
def __call__(
self,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
images: ImageInput = None,
padding: Union[bool, str, PaddingStrategy] = False,
truncation: Union[bool, str, TruncationStrategy] = None,
max_length: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to SiglipTokenizer's [`~SiglipTokenizer.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` argument to
SiglipImageProcessor's [`~SiglipImageProcessor.__call__`] if `images` is not `None`. Please refer to the docstring
of the above two methods for more information.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding
index) among:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
max_length (`int`, *optional*):
Maximum length of the returned list and optionally padding length (see above).
truncation (`bool`, *optional*):
Activates truncation to cut input sequences longer than `max_length` to `max_length`.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
encoding = self.tokenizer(
text, return_tensors=return_tensors, padding=padding, truncation=truncation, max_length=max_length
)
if images is not None:
image_features = self.image_processor(images, return_tensors=return_tensors)
if text is not None and images is not None:
encoding.update(image_features)
return encoding
elif text is not None:
return encoding
else:
return BatchFeature(data=dict(**image_features), tensor_type=return_tensors)
__all__ = ["SiglipProcessor"]
| transformers/src/transformers/models/siglip/processing_siglip.py/0 | {
"file_path": "transformers/src/transformers/models/siglip/processing_siglip.py",
"repo_id": "transformers",
"token_count": 2415
} | 507 |
# Copyright 2024 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import os
import re
import torch
from datasets import load_dataset
from transformers import (
AutoModelForKeypointDetection,
SuperGlueConfig,
SuperGlueForKeypointMatching,
SuperGlueImageProcessor,
)
def prepare_imgs():
dataset = load_dataset("hf-internal-testing/image-matching-test-dataset", split="train")
image1 = dataset[0]["image"]
image2 = dataset[1]["image"]
image3 = dataset[2]["image"]
return [[image1, image2], [image3, image2]]
def verify_model_outputs(model, model_name, device):
images = prepare_imgs()
preprocessor = SuperGlueImageProcessor()
inputs = preprocessor(images=images, return_tensors="pt").to(device)
model.to(device)
with torch.no_grad():
outputs = model(**inputs, output_hidden_states=True, output_attentions=True)
predicted_matches_values = outputs.matches[0, 0, :10]
predicted_matching_scores_values = outputs.matching_scores[0, 0, :10]
predicted_number_of_matches = torch.sum(outputs.matches[0][0] != -1).item()
if "outdoor" in model_name:
expected_max_number_keypoints = 865
expected_matches_shape = torch.Size((len(images), 2, expected_max_number_keypoints))
expected_matching_scores_shape = torch.Size((len(images), 2, expected_max_number_keypoints))
expected_matches_values = torch.tensor(
[125, 630, 137, 138, 136, 143, 135, -1, -1, 153], dtype=torch.int64, device=device
)
expected_matching_scores_values = torch.tensor(
[0.9899, 0.0033, 0.9897, 0.9889, 0.9879, 0.7464, 0.7109, 0, 0, 0.9841], device=device
)
expected_number_of_matches = 281
elif "indoor" in model_name:
expected_max_number_keypoints = 865
expected_matches_shape = torch.Size((len(images), 2, expected_max_number_keypoints))
expected_matching_scores_shape = torch.Size((len(images), 2, expected_max_number_keypoints))
expected_matches_values = torch.tensor(
[125, 144, 137, 138, 136, 155, 135, -1, -1, 153], dtype=torch.int64, device=device
)
expected_matching_scores_values = torch.tensor(
[0.9694, 0.0010, 0.9006, 0.8753, 0.8521, 0.5688, 0.6321, 0.0, 0.0, 0.7235], device=device
)
expected_number_of_matches = 282
assert outputs.matches.shape == expected_matches_shape
assert outputs.matching_scores.shape == expected_matching_scores_shape
assert torch.allclose(predicted_matches_values, expected_matches_values, atol=1e-4)
assert torch.allclose(predicted_matching_scores_values, expected_matching_scores_values, atol=1e-4)
assert predicted_number_of_matches == expected_number_of_matches
ORIGINAL_TO_CONVERTED_KEY_MAPPING = {
r"kenc.encoder.(\d+)": r"keypoint_encoder.encoder.\1.old",
r"gnn.layers.(\d+).attn.proj.0": r"gnn.layers.\1.attention.self.query",
r"gnn.layers.(\d+).attn.proj.1": r"gnn.layers.\1.attention.self.key",
r"gnn.layers.(\d+).attn.proj.2": r"gnn.layers.\1.attention.self.value",
r"gnn.layers.(\d+).attn.merge": r"gnn.layers.\1.attention.output.dense",
r"gnn.layers.(\d+).mlp.0": r"gnn.layers.\1.mlp.0.linear",
r"gnn.layers.(\d+).mlp.1": r"gnn.layers.\1.mlp.0.batch_norm",
r"gnn.layers.(\d+).mlp.3": r"gnn.layers.\1.mlp.1",
r"final_proj": r"final_projection.final_proj",
}
def convert_old_keys_to_new_keys(state_dict_keys: list[str], conversion_mapping=ORIGINAL_TO_CONVERTED_KEY_MAPPING):
"""
This function should be applied only once, on the concatenated keys to efficiently rename using
the key mappings.
"""
output_dict = {}
if state_dict_keys is not None:
old_text = "\n".join(state_dict_keys)
new_text = old_text
for pattern, replacement in conversion_mapping.items():
if replacement is None:
new_text = re.sub(pattern, "", new_text) # an empty line
continue
new_text = re.sub(pattern, replacement, new_text)
output_dict = dict(zip(old_text.split("\n"), new_text.split("\n")))
return output_dict
def replace_state_dict_keys(all_keys, new_keys, original_state_dict):
state_dict = {}
for key in all_keys:
new_key = new_keys[key]
state_dict[new_key] = original_state_dict.pop(key).contiguous().clone()
return state_dict
def convert_state_dict(state_dict, config):
converted_to_final_key_mapping = {}
def convert_conv_to_linear(keys):
for key in keys:
state_dict[key] = state_dict[key].squeeze(-1)
def qkv_permute_weights_and_biases(keys, num_heads=4):
for key in keys:
tensor = state_dict[key]
shape = tensor.shape
dim_out = shape[0]
if len(shape) == 2:
dim_in = shape[1]
tensor = (
tensor.reshape(dim_out // num_heads, num_heads, dim_in).permute(1, 0, 2).reshape(dim_out, dim_in)
)
if len(shape) == 1:
tensor = tensor.reshape(dim_out // num_heads, num_heads).permute(1, 0).reshape(dim_out)
state_dict[key] = tensor
def output_permute_weights(keys, num_heads=4):
for key in keys:
tensor = state_dict[key]
dim_in = tensor.shape[1]
dim_out = tensor.shape[0]
tensor = tensor.reshape(dim_out, dim_in // num_heads, num_heads).permute(0, 2, 1).reshape(dim_out, dim_in)
state_dict[key] = tensor
conv_keys = []
qkv_permute_keys = []
output_permute_keys = []
# Keypoint Encoder
keypoint_encoder_key = "keypoint_encoder.encoder"
for i in range(1, len(config.keypoint_encoder_sizes) + 2):
old_conv_key = f"{keypoint_encoder_key}.{(i - 1) * 3}.old"
new_index = i - 1
new_conv_key = f"{keypoint_encoder_key}.{new_index}."
if i < len(config.keypoint_encoder_sizes) + 1:
new_conv_key = f"{new_conv_key}linear."
converted_to_final_key_mapping[rf"{old_conv_key}\."] = new_conv_key
if i < len(config.keypoint_encoder_sizes) + 1:
old_batch_norm_key = f"{keypoint_encoder_key}.{(i - 1) * 3 + 1}.old"
new_batch_norm_key = f"{keypoint_encoder_key}.{new_index}.batch_norm."
converted_to_final_key_mapping[rf"{old_batch_norm_key}\."] = new_batch_norm_key
conv_keys.append(f"{old_conv_key}.weight")
# Attentional GNN
for i in range(len(config.gnn_layers_types)):
gnn_layer_key = f"gnn.layers.{i}"
## Attention
attention_key = f"{gnn_layer_key}.attention"
conv_keys.extend(
[
f"{attention_key}.self.query.weight",
f"{attention_key}.self.key.weight",
f"{attention_key}.self.value.weight",
f"{attention_key}.output.dense.weight",
]
)
qkv_permute_keys.extend(
[
f"{attention_key}.self.query.weight",
f"{attention_key}.self.key.weight",
f"{attention_key}.self.value.weight",
f"{attention_key}.self.query.bias",
f"{attention_key}.self.key.bias",
f"{attention_key}.self.value.bias",
]
)
output_permute_keys.append(f"{attention_key}.output.dense.weight")
## MLP
conv_keys.extend([f"{gnn_layer_key}.mlp.0.linear.weight", f"{gnn_layer_key}.mlp.1.weight"])
# Final Projection
conv_keys.append("final_projection.final_proj.weight")
convert_conv_to_linear(conv_keys)
qkv_permute_weights_and_biases(qkv_permute_keys)
output_permute_weights(output_permute_keys)
all_keys = list(state_dict.keys())
new_keys = convert_old_keys_to_new_keys(all_keys, converted_to_final_key_mapping)
state_dict = replace_state_dict_keys(all_keys, new_keys, state_dict)
return state_dict
def add_keypoint_detector_state_dict(superglue_state_dict):
keypoint_detector = AutoModelForKeypointDetection.from_pretrained("magic-leap-community/superpoint")
keypoint_detector_state_dict = keypoint_detector.state_dict()
for k, v in keypoint_detector_state_dict.items():
superglue_state_dict[f"keypoint_detector.{k}"] = v
return superglue_state_dict
@torch.no_grad()
def write_model(
model_path,
checkpoint_url,
safe_serialization=True,
push_to_hub=False,
):
os.makedirs(model_path, exist_ok=True)
# ------------------------------------------------------------
# SuperGlue config
# ------------------------------------------------------------
config = SuperGlueConfig(
hidden_size=256,
keypoint_encoder_sizes=[32, 64, 128, 256],
gnn_layers_types=["self", "cross"] * 9,
sinkhorn_iterations=100,
matching_threshold=0.0,
)
config.architectures = ["SuperGlueForKeypointMatching"]
config.save_pretrained(model_path, push_to_hub=push_to_hub)
print("Model config saved successfully...")
# ------------------------------------------------------------
# Convert weights
# ------------------------------------------------------------
print(f"Fetching all parameters from the checkpoint at {checkpoint_url}...")
original_state_dict = torch.hub.load_state_dict_from_url(checkpoint_url)
print("Converting model...")
all_keys = list(original_state_dict.keys())
new_keys = convert_old_keys_to_new_keys(all_keys)
state_dict = replace_state_dict_keys(all_keys, new_keys, original_state_dict)
state_dict = convert_state_dict(state_dict, config)
del original_state_dict
gc.collect()
state_dict = add_keypoint_detector_state_dict(state_dict)
print("Loading the checkpoint in a SuperGlue model...")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
with torch.device(device):
model = SuperGlueForKeypointMatching(config)
model.load_state_dict(state_dict, strict=True)
print("Checkpoint loaded successfully...")
del model.config._name_or_path
print("Saving the model...")
model.save_pretrained(model_path, safe_serialization=safe_serialization)
del state_dict, model
# Safety check: reload the converted model
gc.collect()
print("Reloading the model to check if it's saved correctly.")
model = SuperGlueForKeypointMatching.from_pretrained(model_path)
print("Model reloaded successfully.")
model_name = "superglue"
if "superglue_outdoor.pth" in checkpoint_url:
model_name += "_outdoor"
elif "superglue_indoor.pth" in checkpoint_url:
model_name += "_indoor"
print("Checking the model outputs...")
verify_model_outputs(model, model_name, device)
print("Model outputs verified successfully.")
organization = "magic-leap-community"
if push_to_hub:
print("Pushing model to the hub...")
model.push_to_hub(
repo_id=f"{organization}/{model_name}",
commit_message="Add model",
)
write_image_processor(model_path, model_name, organization, push_to_hub=push_to_hub)
def write_image_processor(save_dir, model_name, organization, push_to_hub=False):
image_processor = SuperGlueImageProcessor()
image_processor.save_pretrained(save_dir)
if push_to_hub:
print("Pushing image processor to the hub...")
image_processor.push_to_hub(
repo_id=f"{organization}/{model_name}",
commit_message="Add image processor",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://raw.githubusercontent.com/magicleap/SuperGluePretrainedNetwork/master/models/weights/superglue_indoor.pth",
type=str,
help="URL of the original SuperGlue checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push model and image preprocessor to the hub",
)
args = parser.parse_args()
write_model(
args.pytorch_dump_folder_path, args.checkpoint_url, safe_serialization=True, push_to_hub=args.push_to_hub
)
| transformers/src/transformers/models/superglue/convert_superglue_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/superglue/convert_superglue_to_hf.py",
"repo_id": "transformers",
"token_count": 5591
} | 508 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Swin SimMIM checkpoints from the original repository.
URL: https://github.com/microsoft/Swin-Transformer/blob/main/MODELHUB.md#simmim-pretrained-swin-v1-models"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def get_swin_config(model_name):
config = SwinConfig(image_size=192)
if "base" in model_name:
window_size = 6
embed_dim = 128
depths = (2, 2, 18, 2)
num_heads = (4, 8, 16, 32)
elif "large" in model_name:
window_size = 12
embed_dim = 192
depths = (2, 2, 18, 2)
num_heads = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants")
config.window_size = window_size
config.embed_dim = embed_dim
config.depths = depths
config.num_heads = num_heads
return config
def rename_key(name):
if "encoder.mask_token" in name:
name = name.replace("encoder.mask_token", "embeddings.mask_token")
if "encoder.patch_embed.proj" in name:
name = name.replace("encoder.patch_embed.proj", "embeddings.patch_embeddings.projection")
if "encoder.patch_embed.norm" in name:
name = name.replace("encoder.patch_embed.norm", "embeddings.norm")
if "attn.proj" in name:
name = name.replace("attn.proj", "attention.output.dense")
if "attn" in name:
name = name.replace("attn", "attention.self")
if "norm1" in name:
name = name.replace("norm1", "layernorm_before")
if "norm2" in name:
name = name.replace("norm2", "layernorm_after")
if "mlp.fc1" in name:
name = name.replace("mlp.fc1", "intermediate.dense")
if "mlp.fc2" in name:
name = name.replace("mlp.fc2", "output.dense")
if name == "encoder.norm.weight":
name = "layernorm.weight"
if name == "encoder.norm.bias":
name = "layernorm.bias"
if "decoder" in name:
pass
else:
name = "swin." + name
return name
def convert_state_dict(orig_state_dict, model):
for key in orig_state_dict.copy():
val = orig_state_dict.pop(key)
if "attn_mask" in key:
pass
elif "qkv" in key:
key_split = key.split(".")
layer_num = int(key_split[2])
block_num = int(key_split[4])
dim = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.weight"] = (
val[:dim, :]
)
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.weight"] = val[
dim : dim * 2, :
]
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.weight"] = (
val[-dim:, :]
)
else:
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.query.bias"] = val[
:dim
]
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.key.bias"] = val[
dim : dim * 2
]
orig_state_dict[f"swin.encoder.layers.{layer_num}.blocks.{block_num}.attention.self.value.bias"] = val[
-dim:
]
else:
orig_state_dict[rename_key(key)] = val
return orig_state_dict
def convert_swin_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_path, push_to_hub):
state_dict = torch.load(checkpoint_path, map_location="cpu", weights_only=True)["model"]
config = get_swin_config(model_name)
model = SwinForMaskedImageModeling(config)
model.eval()
new_state_dict = convert_state_dict(state_dict, model)
model.load_state_dict(new_state_dict)
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
image_processor = ViTImageProcessor(size={"height": 192, "width": 192})
image = Image.open(requests.get(url, stream=True).raw)
inputs = image_processor(images=image, return_tensors="pt")
with torch.no_grad():
outputs = model(**inputs).logits
print(outputs.keys())
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
print(f"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(pytorch_dump_folder_path)
if push_to_hub:
print(f"Pushing model and image processor for {model_name} to hub")
model.push_to_hub(f"microsoft/{model_name}")
image_processor.push_to_hub(f"microsoft/{model_name}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="swin-base-simmim-window6-192",
type=str,
choices=["swin-base-simmim-window6-192", "swin-large-simmim-window12-192"],
help="Name of the Swin SimMIM model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth",
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
args = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| transformers/src/transformers/models/swin/convert_swin_simmim_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/swin/convert_swin_simmim_to_pytorch.py",
"repo_id": "transformers",
"token_count": 2902
} | 509 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def rename_base_flax_keys(flax_key_tuple, flax_tensor):
"""
Post renaming of basic JAX keys to pytorch.
"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
flax_key_tuple = flax_key_tuple[:-1] + ("weight",)
flax_tensor = torch.permute(flax_tensor, (0, 2, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(flax_key_tuple):
# linear layer
flax_key_tuple = flax_key_tuple[:-1] + ("weight",)
flax_tensor = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
flax_key_tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def get_key_and_tensorstore_dict(layer, checkpoint_info, switch_checkpoint_path):
if "metadata" in layer:
split_layer = layer.split("metadata")
curr_real_layer_name = "".join(split_layer[0])[:-1]
split_layer = [tuple(("metadata" + split_layer[1]).split("/"))]
elif "kvstore" in layer:
split_layer = layer.split("kvstore")
curr_real_layer_name = "".join(split_layer[0])[:-1]
split_layer = [tuple(("kvstore" + split_layer[1]).split("/"))]
else:
split_layer = layer.split("/")
curr_real_layer_name = "/".join(split_layer[:-1])
split_layer[-1] = (split_layer[-1],)
if "kvstore/path" in layer:
content = f"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
content = "file"
else:
content = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def rename_and_save_block(current_block, save_path):
current_block = rename_keys(current_block)
new_current_block = {}
for k, v in current_block.items():
new_current_block[k.replace("/", ".")] = v
current_block = new_current_block
torch.save(current_block, save_path)
def shard_on_the_fly(switch_checkpoint_path, dump_path, max_shard_size, dtype, weights_name: str = WEIGHTS_NAME):
max_shard_size = convert_file_size_to_int(max_shard_size)
sharded_state_dicts = []
current_block = {}
current_block_size = 0
total_size = 0
os.makedirs(dump_path, exist_ok=True)
with gfile.GFile(switch_checkpoint_path + "/checkpoint", "rb") as fp:
checkpoint_info = serialization.msgpack_restore(fp.read())["optimizer"]["target"]
checkpoint_info = flatten_dict(checkpoint_info, sep="/")
all_layers = {}
for layer in checkpoint_info:
curr_real_layer_name, split_layer, content = get_key_and_tensorstore_dict(
layer, checkpoint_info, switch_checkpoint_path
)
if curr_real_layer_name in all_layers:
all_layers[curr_real_layer_name][split_layer[-1]] = content
else:
all_layers[curr_real_layer_name] = {split_layer[-1]: content}
for key, layer in all_layers.items():
# open tensorstore file
raw_weights = ts.open(unflatten_dict(layer)).result().read().result()
raw_weights = torch.tensor(raw_weights)
weight_size = raw_weights.numel() * raw_weights.element_size()
# use the renaming pattern from the small conversion scripts
key, raw_weights = rename_base_flax_keys(tuple(key.split("/")), raw_weights)
key = "/".join(key)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
save_path = os.path.join(
dump_path, weights_name.replace(".bin", f"-{len(sharded_state_dicts) + 1:05d}-of-???.bin")
)
rename_and_save_block(current_block, save_path)
sharded_state_dicts.append(current_block.keys())
del current_block
current_block = {}
current_block_size = 0
current_block[key] = raw_weights.to(getattr(torch, dtype))
current_block_size += weight_size
total_size += weight_size
# Add the last block
save_path = os.path.join(
dump_path, weights_name.replace(".bin", f"-{len(sharded_state_dicts) + 1:05d}-of-???.bin")
)
rename_and_save_block(current_block, save_path)
sharded_state_dicts.append(current_block.keys())
# If we only have one shard, we return it
if len(sharded_state_dicts) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
weight_map = {}
shards = {}
for idx, shard in enumerate(sharded_state_dicts):
shard_file = weights_name.replace(
".bin", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.bin"
) # len(sharded_state_dicts):05d}
temp_filename = os.path.join(dump_path, weights_name.replace(".bin", f"-{idx + 1:05d}-of-???.bin"))
os.rename(temp_filename, os.path.join(dump_path, shard_file))
shards[shard_file] = shard
for key in shard:
weight_map[key] = shard_file
# Add the metadata
metadata = {"total_size": total_size}
index = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(dump_path, WEIGHTS_INDEX_NAME), "w", encoding="utf-8") as f:
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
f.write(content)
return metadata, index
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
args = parser.parse_args()
shard_on_the_fly(
args.switch_t5x_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def sanity_check():
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, T5Tokenizer
config = SwitchTransformersConfig.from_pretrained("google/switch-base-8")
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted")
model = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted", device_map="auto"
)
tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small")
text = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
input_ids = tokenizer(text, return_tensors="pt").input_ids
out = model.generate(input_ids, decoder_start_token_id=0)
print(tokenizer.decode(out[0]))
| transformers/src/transformers/models/switch_transformers/convert_big_switch.py/0 | {
"file_path": "transformers/src/transformers/models/switch_transformers/convert_big_switch.py",
"repo_id": "transformers",
"token_count": 3221
} | 510 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/t5gemma/modular_t5gemma.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_t5gemma.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2025 Google Inc. HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Optional, Union
import torch
import torch.nn as nn
from transformers.utils.generic import OutputRecorder, check_model_inputs
from ...activations import ACT2FN
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
from ...generation import GenerationMixin
from ...masking_utils import create_causal_mask, create_sliding_window_causal_mask
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_layers import GradientCheckpointingLayer
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import TransformersKwargs, auto_docstring, can_return_tuple, is_torchdynamo_compiling, logging
from ...utils.deprecation import deprecate_kwarg
from .configuration_t5gemma import T5GemmaConfig, T5GemmaModuleConfig
logger = logging.get_logger(__name__)
class T5GemmaRMSNorm(nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.zeros(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float())
# Llama does x.to(float16) * w whilst T5Gemma is (x * w).to(float16)
# See https://github.com/huggingface/transformers/pull/29402
output = output * (1.0 + self.weight.float())
return output.type_as(x)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.eps}"
class T5GemmaMLP(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)
self.act_fn = ACT2FN[config.hidden_activation]
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, x):
hidden_states = self.act_fn(self.gate_proj(x)) * self.up_proj(x)
hidden_states = self.dropout(hidden_states)
down_proj = self.down_proj(hidden_states)
return down_proj
class T5GemmaRotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config, device=None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
dropout: float = 0.0,
scaling: Optional[float] = None,
softcap: Optional[float] = None,
**kwargs,
) -> tuple[torch.Tensor, torch.Tensor]:
if scaling is None:
scaling = module.head_dim**-0.5
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if softcap is not None:
attn_weights = attn_weights / softcap
attn_weights = torch.tanh(attn_weights)
attn_weights = attn_weights * softcap
if attention_mask is not None: # no matter the length, we just slice it
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
class T5GemmaSelfAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: T5GemmaModuleConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = config.query_pre_attn_scalar**-0.5
self.attention_dropout = self.config.attention_dropout
# Requied by flash attention: encoder selfattention is non-causal
self.is_causal = config.is_decoder
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.attn_logit_softcapping = self.config.attn_logit_softcapping
self.sliding_window = config.sliding_window if config.layer_types[layer_idx] == "sliding_attention" else None
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
sliding_window=self.sliding_window,
softcap=self.attn_logit_softcapping,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class T5GemmaCrossAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: T5GemmaModuleConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = config.query_pre_attn_scalar**-0.5
self.attention_dropout = self.config.attention_dropout
self.is_causal = False
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.cross_attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.cross_attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.attn_logit_softcapping = self.config.attn_logit_softcapping
if config.cross_attention_hidden_size is None:
raise ValueError("Cross-attention needs cross_attention_hidden_size to be specified.")
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor],
encoder_hidden_states: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
if encoder_hidden_states is None:
raise ValueError("Encoder hidden state is required for cross attention.")
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
if past_key_values is not None:
is_updated = past_key_values.is_updated.get(self.layer_idx)
curr_past_key_value = past_key_values.cross_attention_cache
if past_key_values is None or not is_updated:
encoder_input_shape = encoder_hidden_states.shape[:-1]
encoder_hidden_shape = (*encoder_input_shape, -1, self.head_dim)
key_states = self.k_proj(encoder_hidden_states).view(encoder_hidden_shape).transpose(1, 2)
value_states = self.v_proj(encoder_hidden_states).view(encoder_hidden_shape).transpose(1, 2)
if past_key_values is not None:
key_states, value_states = curr_past_key_value.update(key_states, value_states, self.layer_idx)
past_key_values.is_updated[self.layer_idx] = True
else:
key_states = curr_past_key_value.layers[self.layer_idx].keys
value_states = curr_past_key_value.layers[self.layer_idx].values
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
sliding_window=None,
softcap=self.attn_logit_softcapping,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
class T5GemmaEncoderLayer(GradientCheckpointingLayer):
"""Encoder sub-layer."""
def __init__(self, config, layer_idx: int):
super().__init__()
self.hidden_size = config.hidden_size
self.config = config
self.layer_idx = layer_idx
self.attention_type = config.layer_types[layer_idx]
self.self_attn = T5GemmaSelfAttention(
config=config,
layer_idx=layer_idx,
)
self.pre_self_attn_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_self_attn_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.mlp = T5GemmaMLP(config)
self.pre_feedforward_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_feedforward_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
**kwargs,
) -> tuple[torch.FloatTensor,]:
residual = hidden_states
hidden_states = self.pre_self_attn_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=None,
**kwargs,
)
hidden_states = self.post_self_attn_layernorm(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
residual = hidden_states
hidden_states = self.pre_feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
return hidden_states
class T5GemmaDecoderLayer(T5GemmaEncoderLayer):
"""Decoder sub-layer: an extra cross-attention layer."""
def __init__(self, config, layer_idx: int):
super().__init__(config, layer_idx)
self.cross_attn = T5GemmaCrossAttention(config=config, layer_idx=layer_idx)
self.pre_cross_attn_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_cross_attn_layernorm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[EncoderDecoderCache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.pre_self_attn_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values.self_attention_cache if past_key_values is not None else None,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.post_self_attn_layernorm(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
residual = hidden_states
hidden_states = self.pre_cross_attn_layernorm(hidden_states)
hidden_states, _ = self.cross_attn(
hidden_states=hidden_states,
encoder_hidden_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
**kwargs,
)
hidden_states = self.post_cross_attn_layernorm(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
residual = hidden_states
hidden_states = self.pre_feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = self.post_feedforward_layernorm(hidden_states)
hidden_states = residual + self.dropout(hidden_states)
return hidden_states
class T5GemmaClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(self, hidden_size: int, num_labels: int, classifier_dropout_rate: float = 0.0):
super().__init__()
self.dropout = nn.Dropout(p=classifier_dropout_rate)
self.out_proj = nn.Linear(hidden_size, num_labels)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class T5GemmaLMHead(nn.Module):
"""Head for language modeling (generation) tasks."""
def __init__(self, hidden_size: int, vocab_size: int, bias: bool = False):
super().__init__()
self.out_proj = nn.Linear(hidden_size, vocab_size, bias=bias)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
logits = self.out_proj(hidden_states)
return logits
class T5GemmaAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: T5GemmaConfig, layer_idx: int):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.scaling = config.query_pre_attn_scalar**-0.5
self.attention_dropout = self.config.attention_dropout
self.is_causal = True
self.q_proj = nn.Linear(
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
)
self.k_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.v_proj = nn.Linear(
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
)
self.o_proj = nn.Linear(
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
)
self.attn_logit_softcapping = self.config.attn_logit_softcapping
self.sliding_window = config.sliding_window if config.layer_types[layer_idx] == "sliding_attention" else None
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: tuple[torch.Tensor, torch.Tensor],
attention_mask: Optional[torch.Tensor],
past_key_values: Optional[Cache] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=self.attention_dropout if self.training else 0.0,
scaling=self.scaling,
sliding_window=self.sliding_window,
softcap=self.attn_logit_softcapping,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
@auto_docstring
class T5GemmaPreTrainedModel(PreTrainedModel):
config: T5GemmaConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["T5GemmaBlock"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": T5GemmaDecoderLayer,
"attentions": T5GemmaAttention,
}
def _init_weights(self, module):
# TODO: support intialization for encoders and decoders separately(?)
super()._init_weights(module)
std = self.config.initializer_range
if isinstance(module, T5GemmaClassificationHead):
scale = module.out_proj.weight.shape[0] ** -0.5
module.out_proj.weight.data.normal_(mean=0.0, std=std * scale)
if hasattr(module.out_proj, "bias") and module.out_proj.bias is not None:
module.out_proj.bias.data.zero_()
elif isinstance(module, T5GemmaLMHead):
if not self.config.tie_word_embeddings:
scale = module.out_proj.weight.shape[0] ** -0.5
module.out_proj.weight.data.normal_(mean=0.0, std=std * scale)
def _shift_right(self, input_ids):
"""
Shifts input_ids to the right, prepends the decoder_start_token_id, and handles
pad_token_id replacement for labels that were -100.
This is a common preparation step for decoder inputs in sequence-to-sequence models.
"""
decoder_start_token_id = self.config.decoder.bos_token_id
pad_token_id = self.config.decoder.pad_token_id
if decoder_start_token_id is None:
raise ValueError("self.model.config.decoder.bos_token_id has to be defined. ")
# shift inputs to the right
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
shifted_input_ids[..., 0] = decoder_start_token_id
if pad_token_id is None:
raise ValueError("self.model.config.decoder.pad_token_id has to be defined.")
# Is this T5 specific?
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
def bidirectional_mask_function(attention_mask: Optional[torch.Tensor]) -> Callable:
"""
This creates bidirectional attention mask.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
if attention_mask is None:
return torch.ones((), dtype=torch.bool)
return attention_mask[batch_idx, kv_idx].to(torch.bool)
return inner_mask
def sliding_window_bidirectional_mask_function(sliding_window: int) -> Callable:
"""
This creates bidirectional attention mask with sliding window.
"""
def inner_mask(batch_idx: int, head_idx: int, q_idx: int, kv_idx: int) -> bool:
return (q_idx - sliding_window < kv_idx) & (kv_idx < q_idx + sliding_window)
return inner_mask
def make_default_2d_attention_mask(
token_ids: Optional[torch.LongTensor],
hidden_states: torch.Tensor,
pad_token_id: Optional[int],
) -> torch.Tensor:
"""Construct the default attention mask."""
if token_ids is not None:
if pad_token_id is None:
raise ValueError("`pad_token_id` is required for padding information.")
attention_mask = (token_ids != pad_token_id).to(hidden_states.device, torch.long)
else:
attention_mask = torch.ones(
(hidden_states.shape[0], hidden_states.shape[1]), device=hidden_states.device, dtype=torch.long
)
return attention_mask
class T5GemmaEncoder(T5GemmaPreTrainedModel):
_can_record_outputs = {
"attentions": T5GemmaSelfAttention,
"hidden_states": T5GemmaEncoderLayer,
}
def __init__(self, config):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.norm = T5GemmaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.rotary_emb = T5GemmaRotaryEmbedding(config=config)
self.gradient_checkpointing = False
self.layers = nn.ModuleList(
[T5GemmaEncoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.dropout = nn.Dropout(config.dropout_rate)
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
# As we want to pass `past_key_values=None` explicitly everwhere, we need to pop them from kwargs if present
kwargs.pop("past_key_values", None)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
cache_position = torch.arange(0, inputs_embeds.shape[1], device=inputs_embeds.device)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
if attention_mask is None:
attention_mask = make_default_2d_attention_mask(input_ids, inputs_embeds, self.config.pad_token_id)
if not isinstance(self_attn_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": None,
"position_ids": position_ids,
}
self_attn_mask_mapping = {
"full_attention": create_causal_mask(
**mask_kwargs,
or_mask_function=bidirectional_mask_function(attention_mask),
),
"sliding_attention": create_sliding_window_causal_mask(
**mask_kwargs,
or_mask_function=sliding_window_bidirectional_mask_function(self.config.sliding_window),
and_mask_function=bidirectional_mask_function(attention_mask),
),
}
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype)
hidden_states = hidden_states * normalizer
hidden_states = self.dropout(hidden_states)
for layer_module in self.layers[: self.config.num_hidden_layers]:
hidden_states = layer_module(
hidden_states,
position_embeddings,
self_attn_mask_mapping[layer_module.attention_type],
position_ids,
**kwargs,
)
hidden_states = self.norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return BaseModelOutput(
last_hidden_state=hidden_states,
)
class T5GemmaDecoder(T5GemmaEncoder):
_can_record_outputs = {
"attentions": OutputRecorder(T5GemmaSelfAttention, index=1),
"cross_attentions": OutputRecorder(T5GemmaCrossAttention, index=1),
"hidden_states": T5GemmaDecoderLayer,
}
def __init__(self, config):
super().__init__(config)
self.layers = nn.ModuleList(
[T5GemmaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.post_init()
@check_model_inputs
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[EncoderDecoderCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPastAndCrossAttentions:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states` must be given in decoder")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if not self.training and use_cache and past_key_values is None:
past_key_values = EncoderDecoderCache(DynamicCache(config=self.config), DynamicCache(config=self.config))
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
if attention_mask is None and past_key_values is None:
attention_mask = make_default_2d_attention_mask(input_ids, inputs_embeds, self.config.pad_token_id)
if not isinstance(self_attn_mask_mapping := attention_mask, dict):
mask_kwargs = {
"config": self.config,
"input_embeds": inputs_embeds,
"attention_mask": attention_mask,
"cache_position": cache_position,
"past_key_values": past_key_values.self_attention_cache if past_key_values is not None else None,
"position_ids": position_ids,
}
self_attn_mask_mapping = {
"full_attention": create_causal_mask(**mask_kwargs),
"sliding_attention": create_sliding_window_causal_mask(**mask_kwargs),
}
if not isinstance(cross_attn_mask_mapping := encoder_attention_mask, dict):
mask_kwargs = {
"config": self.config,
"input_embeds": encoder_hidden_states,
"attention_mask": encoder_attention_mask,
"cache_position": cache_position,
"past_key_values": None,
"position_ids": None,
}
cross_attn_mask_mapping = {
"full_attention": create_causal_mask(
**mask_kwargs,
or_mask_function=bidirectional_mask_function(encoder_attention_mask),
),
}
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids)
normalizer = torch.tensor(self.config.hidden_size**0.5, dtype=hidden_states.dtype)
hidden_states = hidden_states * normalizer
hidden_states = self.dropout(hidden_states)
for layer_module in self.layers[: self.config.num_hidden_layers]:
hidden_states = layer_module(
hidden_states,
position_embeddings,
self_attn_mask_mapping[layer_module.attention_type],
position_ids,
past_key_values,
use_cache,
cache_position,
encoder_hidden_states,
cross_attn_mask_mapping["full_attention"],
**kwargs,
)
hidden_states = self.norm(hidden_states)
hidden_states = self.dropout(hidden_states)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
class T5GemmaModel(T5GemmaPreTrainedModel):
def __init__(self, config: T5GemmaConfig):
super().__init__(config)
if not config.is_encoder_decoder:
raise ValueError("T5GemmaModel only support encoder-decoder modeling. Use `T5GemmaEncoderModel` instead.")
self.encoder = T5GemmaEncoder(config.encoder)
self.decoder = T5GemmaDecoder(config.decoder)
self.post_init()
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
return self.encoder.set_input_embeddings(new_embeddings)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
decoder_position_ids: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[BaseModelOutput] = None,
past_key_values: Optional[EncoderDecoderCache] = None,
inputs_embeds: Optional[torch.Tensor] = None,
decoder_inputs_embeds: Optional[torch.Tensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Seq2SeqModelOutput:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
"""
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
**kwargs,
)
encoder_hidden_states = encoder_outputs.last_hidden_state
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=attention_mask,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states
if kwargs.get("output_hidden_states", False)
else (decoder_outputs.last_hidden_state,),
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@auto_docstring
class T5GemmaEncoderModel(T5GemmaPreTrainedModel):
def __init__(self, config: T5GemmaConfig):
super().__init__(config)
if config.is_encoder_decoder:
raise ValueError("T5GemmaEncoderModel only supports encoder-only model. Use `T5GemmaModel` instead.")
self.encoder = T5GemmaEncoder(config.encoder)
self.post_init()
def get_input_embeddings(self):
return self.encoder.get_input_embeddings()
def set_input_embeddings(self, new_embeddings):
return self.encoder.set_input_embeddings(new_embeddings)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutput:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
**kwargs,
)
return encoder_outputs
class T5GemmaForConditionalGeneration(T5GemmaPreTrainedModel, GenerationMixin):
_tied_weights_keys = ["model.decoder.embed_tokens.weight", "lm_head.out_proj.weight"]
_tp_plan = {"lm_head.out_proj": "colwise_rep"}
_pp_plan = {"lm_head.out_proj": (["hidden_states"], ["logits"])}
def __init__(self, config: T5GemmaConfig):
config.is_encoder_decoder = True
super().__init__(config)
self.model = T5GemmaModel(config)
self.vocab_size = config.decoder.vocab_size
self.lm_head = T5GemmaLMHead(config.decoder.hidden_size, self.vocab_size)
self.loss_type = "ForMaskedLM"
self.post_init()
def set_output_embeddings(self, new_embeddings):
self.lm_head.out_proj = new_embeddings
def get_output_embeddings(self):
return self.lm_head.out_proj
def _tie_weights(self):
# Decoder input and output embeddings are tied.
if self.config.tie_word_embeddings:
self._tie_or_clone_weights(self.lm_head.out_proj, self.get_decoder().get_input_embeddings())
def get_encoder(self):
return self.model.encoder
def get_decoder(self):
return self.model.decoder
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
decoder_position_ids: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[BaseModelOutput] = None,
past_key_values: Optional[EncoderDecoderCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.FloatTensor], Seq2SeqLMOutput]:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
"""
if self.training and self.config._attn_implementation != "eager":
msg = (
"It is strongly recommended to train T5Gemma models with the `eager` attention implementation "
f"instead of `{self.config._attn_implementation}`. Use `eager` with `AutoModelForCausalLM.from_pretrained('<path-to-checkpoint>', attn_implementation='eager')`."
)
if is_torchdynamo_compiling():
raise ValueError(msg)
else:
logger.warning_once(msg)
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
decoder_outputs: Seq2SeqModelOutput = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_outputs=encoder_outputs,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = decoder_outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
decoder_config = self.get_decoder().config
if decoder_config.final_logit_softcapping is not None:
logits = logits / decoder_config.final_logit_softcapping
logits = torch.tanh(logits)
logits = logits * decoder_config.final_logit_softcapping
loss = None
if labels is not None:
# Input has right-shifted so we directly perform masked lm loss
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
return Seq2SeqLMOutput(
loss=loss,
logits=logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.decoder_hidden_states,
decoder_attentions=decoder_outputs.decoder_attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=decoder_outputs.encoder_last_hidden_state,
encoder_hidden_states=decoder_outputs.encoder_hidden_states,
encoder_attentions=decoder_outputs.encoder_attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
@auto_docstring
class T5GemmaForSequenceClassification(T5GemmaPreTrainedModel):
def __init__(self, config: T5GemmaConfig, is_encoder_decoder: Optional[bool] = None):
r"""
is_encoder_decoder (`Optional`, *optional*):
Whether use encoder_decoder for sequence classification. When set to False, only encoder is used.
"""
if is_encoder_decoder is not None:
config.is_encoder_decoder = is_encoder_decoder
super().__init__(config)
self.num_labels = config.num_labels
if config.is_encoder_decoder:
self.model = T5GemmaModel(config)
else:
self.model = T5GemmaEncoderModel(config)
hidden_size = config.encoder.hidden_size
if config.is_encoder_decoder:
hidden_size = config.decoder.hidden_size
classifier_dropout = getattr(config, "classifier_dropout_rate", 0.1)
self.score = T5GemmaClassificationHead(hidden_size, self.num_labels, classifier_dropout)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
decoder_position_ids: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[BaseModelOutput] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> SequenceClassifierOutput:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
if self.config.is_encoder_decoder and (input_ids is None and inputs_embeds is not None):
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__} in encoder-decoder mode."
)
# Following T5, we automatically creates decoder_input_ids from input_ids if no decoder_input_ids are provided
if self.config.is_encoder_decoder and (decoder_input_ids is None and decoder_inputs_embeds is None):
if input_ids is None:
raise ValueError(
"If no `decoder_input_ids` or `decoder_inputs_embeds` are "
"passed, `input_ids` cannot be `None`. Please pass either "
"`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
)
decoder_input_ids = self._shift_right(input_ids)
if self.config.is_encoder_decoder:
outputs: Seq2SeqModelOutput = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=False,
**kwargs,
)
last_hidden_state = outputs.last_hidden_state
hidden_states = outputs.decoder_hidden_states
attentions = outputs.decoder_attentions
else:
outputs: BaseModelOutput = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
**kwargs,
)
last_hidden_state = outputs.last_hidden_state
hidden_states = outputs.hidden_states
attentions = outputs.attentions
logits = self.score(last_hidden_state)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
if self.config.is_encoder_decoder:
last_non_pad_token += 1 # due to the right shift.
last_non_pad_token = torch.clamp(last_non_pad_token, max=decoder_input_ids.shape[-1] - 1)
else:
last_non_pad_token = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
return SequenceClassifierOutput(
loss=loss,
logits=pooled_logits,
hidden_states=hidden_states,
attentions=attentions,
)
@auto_docstring
class T5GemmaForTokenClassification(T5GemmaPreTrainedModel):
def __init__(self, config: T5GemmaConfig, is_encoder_decoder: Optional[bool] = None):
r"""
is_encoder_decoder (`Optional`, *optional*):
Whether use encoder_decoder for token classification. When set to False, only encoder is used.
"""
if is_encoder_decoder is not None:
config.is_encoder_decoder = is_encoder_decoder
super().__init__(config)
self.num_labels = config.num_labels
if config.is_encoder_decoder:
self.model = T5GemmaModel(config)
else:
self.model = T5GemmaEncoderModel(config)
hidden_size = config.encoder.hidden_size
if config.is_encoder_decoder:
hidden_size = config.decoder.hidden_size
classifier_dropout = getattr(config, "classifier_dropout_rate", 0.1)
self.score = T5GemmaClassificationHead(hidden_size, self.num_labels, classifier_dropout)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.Tensor] = None,
decoder_position_ids: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[BaseModelOutput] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> TokenClassifierOutput:
r"""
decoder_position_ids (`torch.LongTensor` of shape `(batch_size, decoder_sequence_length)`, *optional*):
Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0,
config.decoder.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
if self.config.is_encoder_decoder and (input_ids is None and inputs_embeds is not None):
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__} in encoder-decoder mode."
)
if self.config.is_encoder_decoder and (decoder_input_ids is None and decoder_inputs_embeds is None):
if input_ids is None:
raise ValueError(
"If no `decoder_input_ids` or `decoder_inputs_embeds` are "
"passed, `input_ids` cannot be `None`. Please pass either "
"`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`."
)
decoder_input_ids = self._shift_right(input_ids)
if self.config.is_encoder_decoder:
outputs: Seq2SeqModelOutput = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_outputs=encoder_outputs,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=False,
**kwargs,
)
last_hidden_state = outputs.last_hidden_state
hidden_states = outputs.decoder_hidden_states
attentions = outputs.decoder_attentions
else:
outputs: BaseModelOutput = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
**kwargs,
)
last_hidden_state = outputs.last_hidden_state
hidden_states = outputs.hidden_states
attentions = outputs.attentions
logits = self.score(last_hidden_state)
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.config)
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=hidden_states,
attentions=attentions,
)
__all__ = [
"T5GemmaForConditionalGeneration",
"T5GemmaModel",
"T5GemmaEncoderModel",
"T5GemmaPreTrainedModel",
"T5GemmaForSequenceClassification",
"T5GemmaForTokenClassification",
]
| transformers/src/transformers/models/t5gemma/modeling_t5gemma.py/0 | {
"file_path": "transformers/src/transformers/models/t5gemma/modeling_t5gemma.py",
"repo_id": "transformers",
"token_count": 27322
} | 511 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuration for Backbone models"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class TimmBackboneConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration for a timm backbone [`TimmBackbone`].
It is used to instantiate a timm backbone model according to the specified arguments, defining the model.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
backbone (`str`, *optional*):
The timm checkpoint to load.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
features_only (`bool`, *optional*, defaults to `True`):
Whether to output only the features or also the logits.
use_pretrained_backbone (`bool`, *optional*, defaults to `True`):
Whether to use a pretrained backbone.
out_indices (`list[int]`, *optional*):
If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how
many stages the model has). Will default to the last stage if unset.
freeze_batch_norm_2d (`bool`, *optional*, defaults to `False`):
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`.
Example:
```python
>>> from transformers import TimmBackboneConfig, TimmBackbone
>>> # Initializing a timm backbone
>>> configuration = TimmBackboneConfig("resnet50")
>>> # Initializing a model from the configuration
>>> model = TimmBackbone(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```
"""
model_type = "timm_backbone"
def __init__(
self,
backbone=None,
num_channels=3,
features_only=True,
use_pretrained_backbone=True,
out_indices=None,
freeze_batch_norm_2d=False,
**kwargs,
):
super().__init__(**kwargs)
self.backbone = backbone
self.num_channels = num_channels
self.features_only = features_only
self.use_pretrained_backbone = use_pretrained_backbone
self.use_timm_backbone = True
self.out_indices = out_indices if out_indices is not None else [-1]
self.freeze_batch_norm_2d = freeze_batch_norm_2d
__all__ = ["TimmBackboneConfig"]
| transformers/src/transformers/models/timm_backbone/configuration_timm_backbone.py/0 | {
"file_path": "transformers/src/transformers/models/timm_backbone/configuration_timm_backbone.py",
"repo_id": "transformers",
"token_count": 1119
} | 512 |
# coding=utf-8
# Copyright 2023 The Intel AIA Team Authors, and HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License=, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing=, software
# distributed under the License is distributed on an "AS IS" BASIS=,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND=, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Processor class for TVP.
"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class TvpProcessor(ProcessorMixin):
r"""
Constructs an TVP processor which wraps a TVP image processor and a Bert tokenizer into a single processor.
[`TvpProcessor`] offers all the functionalities of [`TvpImageProcessor`] and [`BertTokenizerFast`]. See the
[`~TvpProcessor.__call__`] and [`~TvpProcessor.decode`] for more information.
Args:
image_processor ([`TvpImageProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`BertTokenizerFast`], *optional*):
The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "TvpImageProcessor"
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(image_processor, tokenizer)
def __call__(self, text=None, videos=None, return_tensors=None, **kwargs):
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `videos` and `kwargs` arguments to
TvpImageProcessor's [`~TvpImageProcessor.__call__`] if `videos` is not `None`. Please refer to the docstring of
the above two methods for more information.
Args:
text (`str`, `list[str]`, `list[list[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`list[PIL.Image.Image]`, `list[np.ndarray]`, `list[torch.Tensor]`, `list[list[PIL.Image.Image]]`, `list[list[np.ndarray]]`,:
`list[list[torch.Tensor]]`): The video or batch of videos to be prepared. Each video should be a list
of frames, which can be either PIL images or NumPy arrays. In case of NumPy arrays/PyTorch tensors,
each frame should be of shape (H, W, C), where H and W are frame height and width, and C is a number of
channels.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `videos` is not `None`.
"""
max_text_length = kwargs.pop("max_text_length", None)
if text is None and videos is None:
raise ValueError("You have to specify either text or videos. Both cannot be none.")
encoding = {}
if text is not None:
textual_input = self.tokenizer.batch_encode_plus(
text,
truncation=True,
padding="max_length",
max_length=max_text_length,
pad_to_max_length=True,
return_tensors=return_tensors,
return_token_type_ids=False,
**kwargs,
)
encoding.update(textual_input)
if videos is not None:
image_features = self.image_processor(videos, return_tensors=return_tensors, **kwargs)
encoding.update(image_features)
return BatchEncoding(data=encoding, tensor_type=return_tensors)
def post_process_video_grounding(self, logits, video_durations):
"""
Compute the time of the video.
Args:
logits (`torch.Tensor`):
The logits output of TvpForVideoGrounding.
video_durations (`float`):
The video's duration.
Returns:
start (`float`):
The start time of the video.
end (`float`):
The end time of the video.
"""
start, end = (
round(logits.tolist()[0][0] * video_durations, 1),
round(logits.tolist()[0][1] * video_durations, 1),
)
return start, end
__all__ = ["TvpProcessor"]
| transformers/src/transformers/models/tvp/processing_tvp.py/0 | {
"file_path": "transformers/src/transformers/models/tvp/processing_tvp.py",
"repo_id": "transformers",
"token_count": 2474
} | 513 |
# coding=utf-8
# Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch UniSpeech model."""
import math
import warnings
from dataclasses import dataclass
from typing import Optional, Union
import torch
import torch.nn as nn
from ...modeling_outputs import ModelOutput, Wav2Vec2BaseModelOutput
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring, logging
from ..wav2vec2.modeling_wav2vec2 import (
Wav2Vec2Encoder,
Wav2Vec2EncoderStableLayerNorm,
Wav2Vec2FeatureEncoder,
Wav2Vec2FeatureProjection,
Wav2Vec2ForCTC,
Wav2Vec2ForSequenceClassification,
Wav2Vec2GumbelVectorQuantizer,
Wav2Vec2Model,
Wav2Vec2PositionalConvEmbedding,
)
from .configuration_unispeech import UniSpeechConfig
logger = logging.get_logger(__name__)
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`UniSpeechForPreTrainingOutput`], with potential hidden states and attentions.
"""
)
class UniSpeechForPreTrainingOutput(ModelOutput):
r"""
loss (*optional*, returned when model is in train mode, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the contrastive loss (L_m) and the diversity loss (L_d) as stated in the [official
paper](https://arxiv.org/pdf/2006.11477.pdf) . (classification) loss.
projected_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Hidden-states of the model projected to *config.proj_codevector_dim* that can be used to predict the masked
projected quantized states.
projected_quantized_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.proj_codevector_dim)`):
Quantized extracted feature vectors projected to *config.proj_codevector_dim* representing the positive
target vectors for contrastive loss.
codevector_perplexity (`torch.FloatTensor` of shape `(1,)`):
The perplexity of the codevector distribution, used to measure the diversity of the codebook.
"""
loss: Optional[torch.FloatTensor] = None
projected_states: Optional[torch.FloatTensor] = None
projected_quantized_states: Optional[torch.FloatTensor] = None
codevector_perplexity: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
class UniSpeechPositionalConvEmbedding(Wav2Vec2PositionalConvEmbedding):
pass
class UniSpeechFeatureEncoder(Wav2Vec2FeatureEncoder):
pass
class UniSpeechFeatureProjection(Wav2Vec2FeatureProjection):
pass
class UniSpeechEncoder(Wav2Vec2Encoder):
pass
class UniSpeechEncoderStableLayerNorm(Wav2Vec2EncoderStableLayerNorm):
pass
class UniSpeechGumbelVectorQuantizer(Wav2Vec2GumbelVectorQuantizer):
@staticmethod
def _compute_perplexity(probs):
marginal_probs = probs.mean(dim=0)
perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum()
return perplexity
def forward(self, hidden_states):
batch_size, sequence_length, hidden_size = hidden_states.shape
# project to codevector dim
hidden_states = self.weight_proj(hidden_states)
hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1)
if self.training:
# sample code vector probs via gumbel in differentiateable way
codevector_probs = nn.functional.gumbel_softmax(
hidden_states.float(), tau=self.temperature, hard=True
).type_as(hidden_states)
# compute perplexity
codevector_soft_dist = torch.softmax(
hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1
)
perplexity = self._compute_perplexity(codevector_soft_dist)
else:
# take argmax in non-differentiable way
# comptute hard codevector distribution (one hot)
codevector_idx = hidden_states.argmax(dim=-1)
codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_(
-1, codevector_idx.view(-1, 1), 1.0
)
codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1)
perplexity = self._compute_perplexity(codevector_probs)
codevector_probs = codevector_probs.view(batch_size * sequence_length, -1)
# use probs to retrieve codevectors
codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors
codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1)
codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1)
return codevectors, perplexity
@auto_docstring
class UniSpeechPreTrainedModel(PreTrainedModel):
config: UniSpeechConfig
base_model_prefix = "unispeech"
main_input_name = "input_values"
supports_gradient_checkpointing = True
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
def _init_weights(self, module):
"""Initialize the weights"""
# gumbel softmax requires special init
if isinstance(module, UniSpeechGumbelVectorQuantizer):
module.weight_proj.weight.data.normal_(mean=0.0, std=1)
module.weight_proj.bias.data.zero_()
nn.init.uniform_(module.codevectors)
elif isinstance(module, UniSpeechPositionalConvEmbedding):
nn.init.normal_(
module.conv.weight,
mean=0,
std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)),
)
nn.init.constant_(module.conv.bias, 0)
elif isinstance(module, UniSpeechFeatureProjection):
k = math.sqrt(1 / module.projection.in_features)
nn.init.uniform_(module.projection.weight, a=-k, b=k)
nn.init.uniform_(module.projection.bias, a=-k, b=k)
elif isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, nn.Conv1d):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
nn.init.uniform_(module.bias, a=-k, b=k)
def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
return input_lengths
def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
# Effectively attention_mask.sum(-1), but not inplace to be able to run
# on inference mode.
non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1]
output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths).to(torch.long)
batch_size = attention_mask.shape[0]
attention_mask = torch.zeros(
(batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
)
# these two operations makes sure that all values before the output lengths idxs are attended to
attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
return attention_mask
UniSpeechBaseModelOutput = Wav2Vec2BaseModelOutput
class UniSpeechModel(UniSpeechPreTrainedModel, Wav2Vec2Model):
def __init__(self, config: UniSpeechConfig):
UniSpeechPreTrainedModel.__init__(config)
self.config = config
self.feature_extractor = UniSpeechFeatureEncoder(config)
self.feature_projection = UniSpeechFeatureProjection(config)
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
if config.do_stable_layer_norm:
self.encoder = UniSpeechEncoderStableLayerNorm(config)
else:
self.encoder = UniSpeechEncoder(config)
# Initialize weights and apply final processing
self.post_init()
def freeze_feature_extractor(self):
raise AttributeError("Not needed for UniSpeech")
def freeze_feature_encoder(self):
raise AttributeError("Not needed for UniSpeech")
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
mask_time_indices: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, UniSpeechBaseModelOutput]:
r"""
mask_time_indices (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*):
Indices to mask extracted features for contrastive loss. When in training mode, model learns to predict
masked extracted features in *config.proj_codevector_dim* space.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
extract_features = self.feature_extractor(input_values)
extract_features = extract_features.transpose(1, 2)
if attention_mask is not None:
# compute reduced attention_mask corresponding to feature vectors
attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
hidden_states, extract_features = self.feature_projection(extract_features)
hidden_states = self._mask_hidden_states(
hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask
)
encoder_outputs = self.encoder(
hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if not return_dict:
return (hidden_states, extract_features) + encoder_outputs[1:]
return UniSpeechBaseModelOutput(
last_hidden_state=hidden_states,
extract_features=extract_features,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
@auto_docstring(
custom_intro="""
UniSpeech Model with a vector-quantization module and ctc loss for pre-training.
"""
)
class UniSpeechForPreTraining(UniSpeechPreTrainedModel):
def __init__(self, config: UniSpeechConfig):
super().__init__(config)
self.unispeech = UniSpeechModel(config)
self.dropout_features = nn.Dropout(config.feat_quantizer_dropout)
self.quantizer = UniSpeechGumbelVectorQuantizer(config)
self.project_q = nn.Linear(config.codevector_dim, config.proj_codevector_dim)
self.project_hid = nn.Linear(config.proj_codevector_dim, config.hidden_size)
self.ctc_proj = nn.Linear(config.hidden_size, config.num_ctc_classes)
self.dropout = nn.Dropout(config.final_dropout)
# Initialize weights and apply final processing
self.post_init()
def set_gumbel_temperature(self, temperature: int):
"""
Set the Gumbel softmax temperature to a given value. Only necessary for training
"""
self.quantizer.temperature = temperature
def freeze_feature_extractor(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameters will
not be updated during training.
"""
warnings.warn(
"The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
"Please use the equivalent `freeze_feature_encoder` method instead.",
FutureWarning,
)
self.freeze_feature_encoder()
def freeze_feature_encoder(self):
"""
Calling this function will disable the gradient computation for the feature encoder so that its parameter will
not be updated during training.
"""
self.unispeech.feature_extractor._freeze_parameters()
@staticmethod
def compute_contrastive_logits(
target_features: torch.FloatTensor,
negative_features: torch.FloatTensor,
predicted_features: torch.FloatTensor,
temperature: int = 1,
):
"""
Compute logits for contrastive loss based using cosine similarity as the distance measure between
`[positive_feature, negative_features]` and `[predicted_features]`. Additionally, temperature can be applied.
"""
target_features = torch.cat([target_features, negative_features], dim=0)
logits = torch.cosine_similarity(predicted_features.float(), target_features.float(), dim=-1)
logits = logits.type_as(target_features)
# apply temperature
logits = logits / temperature
return logits
@auto_docstring
def forward(
self,
input_values: Optional[torch.Tensor],
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, UniSpeechForPreTrainingOutput]:
r"""
Example:
```python
>>> import torch
>>> from transformers import AutoFeatureExtractor, UniSpeechForPreTraining
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/unispeech-large-1500h-cv")
>>> model = UniSpeechForPreTraining.from_pretrained("microsoft/unispeech-large-1500h-cv")
>>> # TODO: Add full pretraining example
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.unispeech(
input_values,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
transformer_features = outputs[0]
# quantize all (unmasked) extracted features and project to final vq dim
extract_features = self.dropout_features(outputs[1])
quantized_features, codevector_perplexity = self.quantizer(extract_features)
# project quantized features twice
quantized_features = self.project_q(quantized_features.to(self.project_q.weight.dtype))
quantized_features = self.project_hid(quantized_features)
prob_replace_matrix = torch.empty(transformer_features.size(0), transformer_features.size(1)).fill_(
self.config.replace_prob
)
prob_replace_matrix = prob_replace_matrix.transpose(0, 1)
sampled_replace_matrix = torch.bernoulli(prob_replace_matrix).bool().to(transformer_features.device)
sampled_replace_matrix = sampled_replace_matrix.transpose(0, 1)
sampled_replace_matrix = sampled_replace_matrix.unsqueeze(-1)
logits = transformer_features.masked_fill(sampled_replace_matrix, 0.0) + (
quantized_features.masked_fill(~sampled_replace_matrix, 0.0)
)
# project to ctc units
logits = self.dropout(logits)
logits = self.ctc_proj(logits)
# TODO(PVP) - add negative sampling & loss computation
loss = None
if not return_dict:
if loss is not None:
return (loss, transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return (transformer_features, quantized_features, codevector_perplexity) + outputs[2:]
return UniSpeechForPreTrainingOutput(
loss=loss,
projected_states=transformer_features,
projected_quantized_states=quantized_features,
codevector_perplexity=codevector_perplexity,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class UniSpeechForCTC(Wav2Vec2ForCTC):
pass
class UniSpeechForSequenceClassification(Wav2Vec2ForSequenceClassification):
pass
__all__ = [
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
| transformers/src/transformers/models/unispeech/modular_unispeech.py/0 | {
"file_path": "transformers/src/transformers/models/unispeech/modular_unispeech.py",
"repo_id": "transformers",
"token_count": 7350
} | 514 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch UperNet model. Based on OpenMMLab's implementation, found in https://github.com/open-mmlab/mmsegmentation."""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import auto_docstring
from ...utils.backbone_utils import load_backbone
from .configuration_upernet import UperNetConfig
class UperNetConvModule(nn.Module):
"""
A convolutional block that bundles conv/norm/activation layers. This block simplifies the usage of convolution
layers, which are commonly used with a norm layer (e.g., BatchNorm) and activation layer (e.g., ReLU).
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: Union[int, tuple[int, int]],
padding: Union[int, tuple[int, int], str] = 0,
bias: bool = False,
dilation: Union[int, tuple[int, int]] = 1,
) -> None:
super().__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
padding=padding,
bias=bias,
dilation=dilation,
)
self.batch_norm = nn.BatchNorm2d(out_channels)
self.activation = nn.ReLU()
def forward(self, input: torch.Tensor) -> torch.Tensor:
output = self.conv(input)
output = self.batch_norm(output)
output = self.activation(output)
return output
class UperNetPyramidPoolingBlock(nn.Module):
def __init__(self, pool_scale: int, in_channels: int, channels: int) -> None:
super().__init__()
self.layers = [
nn.AdaptiveAvgPool2d(pool_scale),
UperNetConvModule(in_channels, channels, kernel_size=1),
]
for i, layer in enumerate(self.layers):
self.add_module(str(i), layer)
def forward(self, input: torch.Tensor) -> torch.Tensor:
hidden_state = input
for layer in self.layers:
hidden_state = layer(hidden_state)
return hidden_state
class UperNetPyramidPoolingModule(nn.Module):
"""
Pyramid Pooling Module (PPM) used in PSPNet.
Args:
pool_scales (`tuple[int]`):
Pooling scales used in Pooling Pyramid Module.
in_channels (`int`):
Input channels.
channels (`int`):
Channels after modules, before conv_seg.
align_corners (`bool`):
align_corners argument of F.interpolate.
"""
def __init__(self, pool_scales: tuple[int, ...], in_channels: int, channels: int, align_corners: bool) -> None:
super().__init__()
self.pool_scales = pool_scales
self.align_corners = align_corners
self.in_channels = in_channels
self.channels = channels
self.blocks = []
for i, pool_scale in enumerate(pool_scales):
block = UperNetPyramidPoolingBlock(pool_scale=pool_scale, in_channels=in_channels, channels=channels)
self.blocks.append(block)
self.add_module(str(i), block)
def forward(self, x: torch.Tensor) -> list[torch.Tensor]:
ppm_outs = []
for ppm in self.blocks:
ppm_out = ppm(x)
upsampled_ppm_out = nn.functional.interpolate(
ppm_out, size=x.size()[2:], mode="bilinear", align_corners=self.align_corners
)
ppm_outs.append(upsampled_ppm_out)
return ppm_outs
class UperNetHead(nn.Module):
"""
Unified Perceptual Parsing for Scene Understanding. This head is the implementation of
[UPerNet](https://huggingface.co/papers/1807.10221).
"""
def __init__(self, config, in_channels):
super().__init__()
self.config = config
self.pool_scales = config.pool_scales # e.g. (1, 2, 3, 6)
self.in_channels = in_channels
self.channels = config.hidden_size
self.align_corners = False
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
# PSP Module
self.psp_modules = UperNetPyramidPoolingModule(
self.pool_scales,
self.in_channels[-1],
self.channels,
align_corners=self.align_corners,
)
self.bottleneck = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales) * self.channels,
self.channels,
kernel_size=3,
padding=1,
)
# FPN Module
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
l_conv = UperNetConvModule(in_channels, self.channels, kernel_size=1)
fpn_conv = UperNetConvModule(self.channels, self.channels, kernel_size=3, padding=1)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
self.fpn_bottleneck = UperNetConvModule(
len(self.in_channels) * self.channels,
self.channels,
kernel_size=3,
padding=1,
)
def psp_forward(self, inputs):
x = inputs[-1]
psp_outs = [x]
psp_outs.extend(self.psp_modules(x))
psp_outs = torch.cat(psp_outs, dim=1)
output = self.bottleneck(psp_outs)
return output
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
# build laterals
laterals = [lateral_conv(encoder_hidden_states[i]) for i, lateral_conv in enumerate(self.lateral_convs)]
laterals.append(self.psp_forward(encoder_hidden_states))
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + nn.functional.interpolate(
laterals[i], size=prev_shape, mode="bilinear", align_corners=self.align_corners
)
# build outputs
fpn_outs = [self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels - 1)]
# append psp feature
fpn_outs.append(laterals[-1])
for i in range(used_backbone_levels - 1, 0, -1):
fpn_outs[i] = nn.functional.interpolate(
fpn_outs[i], size=fpn_outs[0].shape[2:], mode="bilinear", align_corners=self.align_corners
)
fpn_outs = torch.cat(fpn_outs, dim=1)
output = self.fpn_bottleneck(fpn_outs)
output = self.classifier(output)
return output
class UperNetFCNHead(nn.Module):
"""
Fully Convolution Networks for Semantic Segmentation. This head is the implementation of
[FCNNet](https://huggingface.co/papers/1411.4038>).
Args:
config:
Configuration.
in_channels (int):
Number of input channels.
kernel_size (int):
The kernel size for convs in the head. Default: 3.
dilation (int):
The dilation rate for convs in the head. Default: 1.
"""
def __init__(
self, config, in_channels, in_index: int = 2, kernel_size: int = 3, dilation: Union[int, tuple[int, int]] = 1
) -> None:
super().__init__()
self.config = config
self.in_channels = (
in_channels[in_index] if config.auxiliary_in_channels is None else config.auxiliary_in_channels
)
self.channels = config.auxiliary_channels
self.num_convs = config.auxiliary_num_convs
self.concat_input = config.auxiliary_concat_input
self.in_index = in_index
conv_padding = (kernel_size // 2) * dilation
convs = []
convs.append(
UperNetConvModule(
self.in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
)
)
for i in range(self.num_convs - 1):
convs.append(
UperNetConvModule(
self.channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation
)
)
if self.num_convs == 0:
self.convs = nn.Identity()
else:
self.convs = nn.Sequential(*convs)
if self.concat_input:
self.conv_cat = UperNetConvModule(
self.in_channels + self.channels, self.channels, kernel_size=kernel_size, padding=kernel_size // 2
)
self.classifier = nn.Conv2d(self.channels, config.num_labels, kernel_size=1)
def forward(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
# just take the relevant feature maps
hidden_states = encoder_hidden_states[self.in_index]
output = self.convs(hidden_states)
if self.concat_input:
output = self.conv_cat(torch.cat([hidden_states, output], dim=1))
output = self.classifier(output)
return output
@auto_docstring
class UperNetPreTrainedModel(PreTrainedModel):
config: UperNetConfig
main_input_name = "pixel_values"
_no_split_modules = []
def _init_weights(self, module):
if isinstance(module, nn.Conv2d):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1.0)
module.bias.data.zero_()
@auto_docstring(
custom_intro="""
UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.
"""
)
class UperNetForSemanticSegmentation(UperNetPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.backbone = load_backbone(config)
# Semantic segmentation head(s)
self.decode_head = UperNetHead(config, in_channels=self.backbone.channels)
self.auxiliary_head = (
UperNetFCNHead(config, in_channels=self.backbone.channels) if config.use_auxiliary_head else None
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SemanticSegmenterOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*):
Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoImageProcessor, UperNetForSemanticSegmentation
>>> from PIL import Image
>>> from huggingface_hub import hf_hub_download
>>> image_processor = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny")
>>> model = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny")
>>> filepath = hf_hub_download(
... repo_id="hf-internal-testing/fixtures_ade20k", filename="ADE_val_00000001.jpg", repo_type="dataset"
... )
>>> image = Image.open(filepath).convert("RGB")
>>> inputs = image_processor(images=image, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits # shape (batch_size, num_labels, height, width)
>>> list(logits.shape)
[1, 150, 512, 512]
```"""
if labels is not None and self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
outputs = self.backbone.forward_with_filtered_kwargs(
pixel_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions
)
features = outputs.feature_maps
logits = self.decode_head(features)
logits = nn.functional.interpolate(logits, size=pixel_values.shape[2:], mode="bilinear", align_corners=False)
auxiliary_logits = None
if self.auxiliary_head is not None:
auxiliary_logits = self.auxiliary_head(features)
auxiliary_logits = nn.functional.interpolate(
auxiliary_logits, size=pixel_values.shape[2:], mode="bilinear", align_corners=False
)
loss = None
if labels is not None:
# compute weighted loss
loss_fct = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index)
loss = loss_fct(logits, labels)
if auxiliary_logits is not None:
auxiliary_loss = loss_fct(auxiliary_logits, labels)
loss += self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
output = (logits,) + outputs[1:]
else:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = ["UperNetForSemanticSegmentation", "UperNetPreTrainedModel"]
| transformers/src/transformers/models/upernet/modeling_upernet.py/0 | {
"file_path": "transformers/src/transformers/models/upernet/modeling_upernet.py",
"repo_id": "transformers",
"token_count": 6472
} | 515 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert ViLT checkpoints from the original Github repository."""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
def create_rename_keys(config, vqa_model=False, nlvr_model=False, irtr_model=False):
rename_keys = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight"))
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias"))
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight")
)
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias")
)
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight"))
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias"))
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight")
)
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias"))
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight"))
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias"))
# embeddings
rename_keys.extend(
[
# text embeddings
("text_embeddings.word_embeddings.weight", "vilt.embeddings.text_embeddings.word_embeddings.weight"),
(
"text_embeddings.position_embeddings.weight",
"vilt.embeddings.text_embeddings.position_embeddings.weight",
),
("text_embeddings.position_ids", "vilt.embeddings.text_embeddings.position_ids"),
(
"text_embeddings.token_type_embeddings.weight",
"vilt.embeddings.text_embeddings.token_type_embeddings.weight",
),
("text_embeddings.LayerNorm.weight", "vilt.embeddings.text_embeddings.LayerNorm.weight"),
("text_embeddings.LayerNorm.bias", "vilt.embeddings.text_embeddings.LayerNorm.bias"),
# patch embeddings
("transformer.cls_token", "vilt.embeddings.cls_token"),
("transformer.patch_embed.proj.weight", "vilt.embeddings.patch_embeddings.projection.weight"),
("transformer.patch_embed.proj.bias", "vilt.embeddings.patch_embeddings.projection.bias"),
("transformer.pos_embed", "vilt.embeddings.position_embeddings"),
# token type embeddings
("token_type_embeddings.weight", "vilt.embeddings.token_type_embeddings.weight"),
]
)
# final layernorm + pooler
rename_keys.extend(
[
("transformer.norm.weight", "vilt.layernorm.weight"),
("transformer.norm.bias", "vilt.layernorm.bias"),
("pooler.dense.weight", "vilt.pooler.dense.weight"),
("pooler.dense.bias", "vilt.pooler.dense.bias"),
]
)
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("vqa_classifier.0.weight", "classifier.0.weight"),
("vqa_classifier.0.bias", "classifier.0.bias"),
("vqa_classifier.1.weight", "classifier.1.weight"),
("vqa_classifier.1.bias", "classifier.1.bias"),
("vqa_classifier.3.weight", "classifier.3.weight"),
("vqa_classifier.3.bias", "classifier.3.bias"),
]
)
elif nlvr_model:
# classification head
rename_keys.extend(
[
("nlvr2_classifier.0.weight", "classifier.0.weight"),
("nlvr2_classifier.0.bias", "classifier.0.bias"),
("nlvr2_classifier.1.weight", "classifier.1.weight"),
("nlvr2_classifier.1.bias", "classifier.1.bias"),
("nlvr2_classifier.3.weight", "classifier.3.weight"),
("nlvr2_classifier.3.bias", "classifier.3.bias"),
]
)
else:
pass
return rename_keys
# we split up the matrix of each encoder layer into queries, keys and values
def read_in_q_k_v(state_dict, config):
for i in range(config.num_hidden_layers):
prefix = "vilt."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
in_proj_weight = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight")
in_proj_bias = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias")
# next, add query, keys and values (in that order) to the state dict
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[
: config.hidden_size, :
]
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size]
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[
-config.hidden_size :, :
]
state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :]
def remove_classification_head_(state_dict):
ignore_keys = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(k, None)
def rename_key(dct, old, new):
val = dct.pop(old)
dct[new] = val
@torch.no_grad()
def convert_vilt_checkpoint(checkpoint_url, pytorch_dump_folder_path):
"""
Copy/paste/tweak model's weights to our ViLT structure.
"""
# define configuration and initialize HuggingFace model
config = ViltConfig(image_size=384, patch_size=32, tie_word_embeddings=False)
mlm_model = False
vqa_model = False
nlvr_model = False
irtr_model = False
if "vqa" in checkpoint_url:
vqa_model = True
config.num_labels = 3129
repo_id = "huggingface/label-files"
filename = "vqa2-id2label.json"
id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
id2label = {int(k): v for k, v in id2label.items()}
config.id2label = id2label
config.label2id = {v: k for k, v in id2label.items()}
model = ViltForQuestionAnswering(config)
elif "nlvr" in checkpoint_url:
nlvr_model = True
config.num_labels = 2
config.id2label = {0: "False", 1: "True"}
config.label2id = {v: k for k, v in config.id2label.items()}
config.modality_type_vocab_size = 3
model = ViltForImagesAndTextClassification(config)
elif "irtr" in checkpoint_url:
irtr_model = True
model = ViltForImageAndTextRetrieval(config)
elif "mlm_itm" in checkpoint_url:
mlm_model = True
model = ViltForMaskedLM(config)
else:
raise ValueError("Unknown model type")
# load state_dict of original model, remove and rename some keys
state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")["state_dict"]
rename_keys = create_rename_keys(config, vqa_model, nlvr_model, irtr_model)
for src, dest in rename_keys:
rename_key(state_dict, src, dest)
read_in_q_k_v(state_dict, config)
if mlm_model or irtr_model:
ignore_keys = ["itm_score.fc.weight", "itm_score.fc.bias"]
for k in ignore_keys:
state_dict.pop(k, None)
# load state dict into HuggingFace model
model.eval()
if mlm_model:
missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(state_dict)
# Define processor
image_processor = ViltImageProcessor(size=384)
tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-uncased")
processor = ViltProcessor(image_processor, tokenizer)
# Forward pass on example inputs (image + text)
if nlvr_model:
image1 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw)
image2 = Image.open(requests.get("https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg", stream=True).raw)
text = (
"The left image contains twice the number of dogs as the right image, and at least two dogs in total are"
" standing."
)
encoding_1 = processor(image1, text, return_tensors="pt")
encoding_2 = processor(image2, text, return_tensors="pt")
outputs = model(
input_ids=encoding_1.input_ids,
pixel_values=encoding_1.pixel_values,
pixel_values_2=encoding_2.pixel_values,
)
else:
image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw)
if mlm_model:
text = "a bunch of [MASK] laying on a [MASK]."
else:
text = "How many cats are there?"
encoding = processor(image, text, return_tensors="pt")
outputs = model(**encoding)
# Verify outputs
if mlm_model:
expected_shape = torch.Size([1, 11, 30522])
expected_slice = torch.tensor([-12.5061, -12.5123, -12.5174])
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4)
# verify masked token prediction equals "cats"
predicted_id = outputs.logits[0, 4, :].argmax(-1).item()
assert tokenizer.decode([predicted_id]) == "cats"
elif vqa_model:
expected_shape = torch.Size([1, 3129])
expected_slice = torch.tensor([-15.9495, -18.1472, -10.3041])
assert torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3], expected_slice, atol=1e-4)
# verify vqa prediction equals "2"
predicted_idx = outputs.logits.argmax(-1).item()
assert model.config.id2label[predicted_idx] == "2"
elif nlvr_model:
expected_shape = torch.Size([1, 2])
expected_slice = torch.tensor([-2.8721, 2.1291])
assert torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)
assert outputs.logits.shape == expected_shape
Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
print(f"Saving model and processor to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path)
processor.save_pretrained(pytorch_dump_folder_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
args = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| transformers/src/transformers/models/vilt/convert_vilt_original_to_pytorch.py/0 | {
"file_path": "transformers/src/transformers/models/vilt/convert_vilt_original_to_pytorch.py",
"repo_id": "transformers",
"token_count": 5694
} | 516 |
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert VITS checkpoint."""
import argparse
import json
import tempfile
import torch
from huggingface_hub import hf_hub_download
from transformers import VitsConfig, VitsModel, VitsTokenizer, logging
logging.set_verbosity_info()
logger = logging.get_logger("transformers.models.vits")
MAPPING_TEXT_ENCODER = {
"enc_p.emb": "text_encoder.embed_tokens",
"enc_p.encoder.attn_layers.*.conv_k": "text_encoder.encoder.layers.*.attention.k_proj",
"enc_p.encoder.attn_layers.*.conv_v": "text_encoder.encoder.layers.*.attention.v_proj",
"enc_p.encoder.attn_layers.*.conv_q": "text_encoder.encoder.layers.*.attention.q_proj",
"enc_p.encoder.attn_layers.*.conv_o": "text_encoder.encoder.layers.*.attention.out_proj",
"enc_p.encoder.attn_layers.*.emb_rel_k": "text_encoder.encoder.layers.*.attention.emb_rel_k",
"enc_p.encoder.attn_layers.*.emb_rel_v": "text_encoder.encoder.layers.*.attention.emb_rel_v",
"enc_p.encoder.norm_layers_1.*.gamma": "text_encoder.encoder.layers.*.layer_norm.weight",
"enc_p.encoder.norm_layers_1.*.beta": "text_encoder.encoder.layers.*.layer_norm.bias",
"enc_p.encoder.ffn_layers.*.conv_1": "text_encoder.encoder.layers.*.feed_forward.conv_1",
"enc_p.encoder.ffn_layers.*.conv_2": "text_encoder.encoder.layers.*.feed_forward.conv_2",
"enc_p.encoder.norm_layers_2.*.gamma": "text_encoder.encoder.layers.*.final_layer_norm.weight",
"enc_p.encoder.norm_layers_2.*.beta": "text_encoder.encoder.layers.*.final_layer_norm.bias",
"enc_p.proj": "text_encoder.project",
}
MAPPING_STOCHASTIC_DURATION_PREDICTOR = {
"dp.pre": "duration_predictor.conv_pre",
"dp.proj": "duration_predictor.conv_proj",
"dp.convs.convs_sep.*": "duration_predictor.conv_dds.convs_dilated.*",
"dp.convs.convs_1x1.*": "duration_predictor.conv_dds.convs_pointwise.*",
"dp.convs.norms_1.*.gamma": "duration_predictor.conv_dds.norms_1.*.weight",
"dp.convs.norms_1.*.beta": "duration_predictor.conv_dds.norms_1.*.bias",
"dp.convs.norms_2.*.gamma": "duration_predictor.conv_dds.norms_2.*.weight",
"dp.convs.norms_2.*.beta": "duration_predictor.conv_dds.norms_2.*.bias",
"dp.flows.0.logs": "duration_predictor.flows.0.log_scale",
"dp.flows.0.m": "duration_predictor.flows.0.translate",
"dp.flows.*.pre": "duration_predictor.flows.*.conv_pre",
"dp.flows.*.proj": "duration_predictor.flows.*.conv_proj",
"dp.flows.*.convs.convs_1x1.0": "duration_predictor.flows.*.conv_dds.convs_pointwise.0",
"dp.flows.*.convs.convs_1x1.1": "duration_predictor.flows.*.conv_dds.convs_pointwise.1",
"dp.flows.*.convs.convs_1x1.2": "duration_predictor.flows.*.conv_dds.convs_pointwise.2",
"dp.flows.*.convs.convs_sep.0": "duration_predictor.flows.*.conv_dds.convs_dilated.0",
"dp.flows.*.convs.convs_sep.1": "duration_predictor.flows.*.conv_dds.convs_dilated.1",
"dp.flows.*.convs.convs_sep.2": "duration_predictor.flows.*.conv_dds.convs_dilated.2",
"dp.flows.*.convs.norms_1.0.gamma": "duration_predictor.flows.*.conv_dds.norms_1.0.weight",
"dp.flows.*.convs.norms_1.0.beta": "duration_predictor.flows.*.conv_dds.norms_1.0.bias",
"dp.flows.*.convs.norms_1.1.gamma": "duration_predictor.flows.*.conv_dds.norms_1.1.weight",
"dp.flows.*.convs.norms_1.1.beta": "duration_predictor.flows.*.conv_dds.norms_1.1.bias",
"dp.flows.*.convs.norms_1.2.gamma": "duration_predictor.flows.*.conv_dds.norms_1.2.weight",
"dp.flows.*.convs.norms_1.2.beta": "duration_predictor.flows.*.conv_dds.norms_1.2.bias",
"dp.flows.*.convs.norms_2.0.gamma": "duration_predictor.flows.*.conv_dds.norms_2.0.weight",
"dp.flows.*.convs.norms_2.0.beta": "duration_predictor.flows.*.conv_dds.norms_2.0.bias",
"dp.flows.*.convs.norms_2.1.gamma": "duration_predictor.flows.*.conv_dds.norms_2.1.weight",
"dp.flows.*.convs.norms_2.1.beta": "duration_predictor.flows.*.conv_dds.norms_2.1.bias",
"dp.flows.*.convs.norms_2.2.gamma": "duration_predictor.flows.*.conv_dds.norms_2.2.weight",
"dp.flows.*.convs.norms_2.2.beta": "duration_predictor.flows.*.conv_dds.norms_2.2.bias",
"dp.post_pre": "duration_predictor.post_conv_pre",
"dp.post_proj": "duration_predictor.post_conv_proj",
"dp.post_convs.convs_sep.*": "duration_predictor.post_conv_dds.convs_dilated.*",
"dp.post_convs.convs_1x1.*": "duration_predictor.post_conv_dds.convs_pointwise.*",
"dp.post_convs.norms_1.*.gamma": "duration_predictor.post_conv_dds.norms_1.*.weight",
"dp.post_convs.norms_1.*.beta": "duration_predictor.post_conv_dds.norms_1.*.bias",
"dp.post_convs.norms_2.*.gamma": "duration_predictor.post_conv_dds.norms_2.*.weight",
"dp.post_convs.norms_2.*.beta": "duration_predictor.post_conv_dds.norms_2.*.bias",
"dp.post_flows.0.logs": "duration_predictor.post_flows.0.log_scale",
"dp.post_flows.0.m": "duration_predictor.post_flows.0.translate",
"dp.post_flows.*.pre": "duration_predictor.post_flows.*.conv_pre",
"dp.post_flows.*.proj": "duration_predictor.post_flows.*.conv_proj",
"dp.post_flows.*.convs.convs_1x1.0": "duration_predictor.post_flows.*.conv_dds.convs_pointwise.0",
"dp.post_flows.*.convs.convs_1x1.1": "duration_predictor.post_flows.*.conv_dds.convs_pointwise.1",
"dp.post_flows.*.convs.convs_1x1.2": "duration_predictor.post_flows.*.conv_dds.convs_pointwise.2",
"dp.post_flows.*.convs.convs_sep.0": "duration_predictor.post_flows.*.conv_dds.convs_dilated.0",
"dp.post_flows.*.convs.convs_sep.1": "duration_predictor.post_flows.*.conv_dds.convs_dilated.1",
"dp.post_flows.*.convs.convs_sep.2": "duration_predictor.post_flows.*.conv_dds.convs_dilated.2",
"dp.post_flows.*.convs.norms_1.0.gamma": "duration_predictor.post_flows.*.conv_dds.norms_1.0.weight",
"dp.post_flows.*.convs.norms_1.0.beta": "duration_predictor.post_flows.*.conv_dds.norms_1.0.bias",
"dp.post_flows.*.convs.norms_1.1.gamma": "duration_predictor.post_flows.*.conv_dds.norms_1.1.weight",
"dp.post_flows.*.convs.norms_1.1.beta": "duration_predictor.post_flows.*.conv_dds.norms_1.1.bias",
"dp.post_flows.*.convs.norms_1.2.gamma": "duration_predictor.post_flows.*.conv_dds.norms_1.2.weight",
"dp.post_flows.*.convs.norms_1.2.beta": "duration_predictor.post_flows.*.conv_dds.norms_1.2.bias",
"dp.post_flows.*.convs.norms_2.0.gamma": "duration_predictor.post_flows.*.conv_dds.norms_2.0.weight",
"dp.post_flows.*.convs.norms_2.0.beta": "duration_predictor.post_flows.*.conv_dds.norms_2.0.bias",
"dp.post_flows.*.convs.norms_2.1.gamma": "duration_predictor.post_flows.*.conv_dds.norms_2.1.weight",
"dp.post_flows.*.convs.norms_2.1.beta": "duration_predictor.post_flows.*.conv_dds.norms_2.1.bias",
"dp.post_flows.*.convs.norms_2.2.gamma": "duration_predictor.post_flows.*.conv_dds.norms_2.2.weight",
"dp.post_flows.*.convs.norms_2.2.beta": "duration_predictor.post_flows.*.conv_dds.norms_2.2.bias",
"dp.cond": "duration_predictor.cond", # num_speakers > 1
}
MAPPING_FLOW = {
"flow.flows.*.pre": "flow.flows.*.conv_pre",
"flow.flows.*.enc.in_layers.0": "flow.flows.*.wavenet.in_layers.0",
"flow.flows.*.enc.in_layers.1": "flow.flows.*.wavenet.in_layers.1",
"flow.flows.*.enc.in_layers.2": "flow.flows.*.wavenet.in_layers.2",
"flow.flows.*.enc.in_layers.3": "flow.flows.*.wavenet.in_layers.3",
"flow.flows.*.enc.res_skip_layers.0": "flow.flows.*.wavenet.res_skip_layers.0",
"flow.flows.*.enc.res_skip_layers.1": "flow.flows.*.wavenet.res_skip_layers.1",
"flow.flows.*.enc.res_skip_layers.2": "flow.flows.*.wavenet.res_skip_layers.2",
"flow.flows.*.enc.res_skip_layers.3": "flow.flows.*.wavenet.res_skip_layers.3",
"flow.flows.*.enc.cond_layer": "flow.flows.*.wavenet.cond_layer", # num_speakers > 1
"flow.flows.*.post": "flow.flows.*.conv_post",
}
MAPPING_GENERATOR = {
"dec.conv_pre": "decoder.conv_pre",
"dec.ups.0": "decoder.upsampler.0",
"dec.ups.1": "decoder.upsampler.1",
"dec.ups.2": "decoder.upsampler.2",
"dec.ups.3": "decoder.upsampler.3",
"dec.resblocks.*.convs1.0": "decoder.resblocks.*.convs1.0",
"dec.resblocks.*.convs1.1": "decoder.resblocks.*.convs1.1",
"dec.resblocks.*.convs1.2": "decoder.resblocks.*.convs1.2",
"dec.resblocks.*.convs2.0": "decoder.resblocks.*.convs2.0",
"dec.resblocks.*.convs2.1": "decoder.resblocks.*.convs2.1",
"dec.resblocks.*.convs2.2": "decoder.resblocks.*.convs2.2",
"dec.conv_post": "decoder.conv_post",
"dec.cond": "decoder.cond", # num_speakers > 1
}
MAPPING_POSTERIOR_ENCODER = {
"enc_q.pre": "posterior_encoder.conv_pre",
"enc_q.enc.in_layers.*": "posterior_encoder.wavenet.in_layers.*",
"enc_q.enc.res_skip_layers.*": "posterior_encoder.wavenet.res_skip_layers.*",
"enc_q.enc.cond_layer": "posterior_encoder.wavenet.cond_layer", # num_speakers > 1
"enc_q.proj": "posterior_encoder.conv_proj",
}
MAPPING = {
**MAPPING_TEXT_ENCODER,
**MAPPING_STOCHASTIC_DURATION_PREDICTOR,
**MAPPING_FLOW,
**MAPPING_GENERATOR,
**MAPPING_POSTERIOR_ENCODER,
"emb_g": "embed_speaker", # num_speakers > 1
}
TOP_LEVEL_KEYS = []
IGNORE_KEYS = []
def set_recursively(hf_pointer, key, value, full_name, weight_type):
for attribute in key.split("."):
hf_pointer = getattr(hf_pointer, attribute)
if weight_type is not None:
hf_shape = getattr(hf_pointer, weight_type).shape
else:
hf_shape = hf_pointer.shape
# strip off the kernel dimension at the end (original weights are Conv1d)
if key.endswith(".k_proj") or key.endswith(".v_proj") or key.endswith(".q_proj") or key.endswith(".out_proj"):
value = value.squeeze(-1)
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
hf_pointer.weight.data = value
elif weight_type == "weight_g":
hf_pointer.weight_g.data = value
elif weight_type == "weight_v":
hf_pointer.weight_v.data = value
elif weight_type == "bias":
hf_pointer.bias.data = value
elif weight_type == "running_mean":
hf_pointer.running_mean.data = value
elif weight_type == "running_var":
hf_pointer.running_var.data = value
elif weight_type == "num_batches_tracked":
hf_pointer.num_batches_tracked.data = value
else:
hf_pointer.data = value
logger.info(f"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.")
def should_ignore(name, ignore_keys):
for key in ignore_keys:
if key.endswith(".*"):
if name.startswith(key[:-1]):
return True
elif ".*." in key:
prefix, suffix = key.split(".*.")
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def recursively_load_weights(fairseq_dict, hf_model):
unused_weights = []
for name, value in fairseq_dict.items():
if should_ignore(name, IGNORE_KEYS):
logger.info(f"{name} was ignored")
continue
is_used = False
for key, mapped_key in MAPPING.items():
if key.endswith(".*"):
key = key[:-1]
elif "*" in key:
prefix, suffix = key.split(".*.")
if prefix in name and suffix in name:
key = suffix
if key in name:
is_used = True
if mapped_key.endswith(".*"):
layer_index = name.split(key)[-1].split(".")[0]
mapped_key = mapped_key.replace("*", layer_index)
elif "*" in mapped_key:
layer_index = name.split(key)[0].split(".")[-2]
# remap the layer index since we removed the Flip layers
if "flow.flows" in mapped_key:
layer_index = str(int(layer_index) // 2)
if "duration_predictor.flows" in mapped_key or "duration_predictor.post_flows" in mapped_key:
layer_index = str(int(layer_index) // 2 + 1)
mapped_key = mapped_key.replace("*", layer_index)
if "weight_g" in name:
weight_type = "weight_g"
elif "weight_v" in name:
weight_type = "weight_v"
elif "bias" in name:
weight_type = "bias"
elif "weight" in name:
weight_type = "weight"
elif "running_mean" in name:
weight_type = "running_mean"
elif "running_var" in name:
weight_type = "running_var"
elif "num_batches_tracked" in name:
weight_type = "num_batches_tracked"
else:
weight_type = None
set_recursively(hf_model, mapped_key, value, name, weight_type)
continue
if not is_used:
unused_weights.append(name)
logger.warning(f"Unused weights: {unused_weights}")
@torch.no_grad()
def convert_checkpoint(
pytorch_dump_folder_path,
checkpoint_path=None,
config_path=None,
vocab_path=None,
language=None,
num_speakers=None,
sampling_rate=None,
repo_id=None,
):
"""
Copy/paste/tweak model's weights to transformers design.
"""
if config_path is not None:
config = VitsConfig.from_pretrained(config_path)
else:
config = VitsConfig()
if num_speakers:
config.num_speakers = num_speakers
config.speaker_embedding_size = 256
if sampling_rate:
config.sampling_rate = sampling_rate
if checkpoint_path is None:
logger.info(f"***Converting model: facebook/mms-tts {language}***")
vocab_path = hf_hub_download(
repo_id="facebook/mms-tts",
filename="vocab.txt",
subfolder=f"models/{language}",
)
config_file = hf_hub_download(
repo_id="facebook/mms-tts",
filename="config.json",
subfolder=f"models/{language}",
)
checkpoint_path = hf_hub_download(
repo_id="facebook/mms-tts",
filename="G_100000.pth",
subfolder=f"models/{language}",
)
with open(config_file, "r") as f:
data = f.read()
hps = json.loads(data)
is_uroman = hps["data"]["training_files"].split(".")[-1] == "uroman"
if is_uroman:
logger.warning("For this checkpoint, you should use `uroman` to convert input text before tokenizing it!")
else:
logger.info(f"***Converting model: {checkpoint_path}***")
is_uroman = False
# original VITS checkpoint
if vocab_path is None:
_pad = "_"
_punctuation = ';:,.!?¡¿—…"«»“” '
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
symbols = _pad + _punctuation + _letters + _letters_ipa
symbol_to_id = {s: i for i, s in enumerate(symbols)}
phonemize = True
else:
# Save vocab as temporary json file
symbols = [line.replace("\n", "") for line in open(vocab_path, encoding="utf-8").readlines()]
symbol_to_id = {s: i for i, s in enumerate(symbols)}
# MMS-TTS does not use a <pad> token, so we set to the token used to space characters
_pad = symbols[0]
phonemize = False
with tempfile.NamedTemporaryFile() as tf:
with open(tf.name, "w", encoding="utf-8") as f:
f.write(json.dumps(symbol_to_id, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
tokenizer = VitsTokenizer(tf.name, language=language, phonemize=phonemize, is_uroman=is_uroman, pad_token=_pad)
config.vocab_size = len(symbols)
model = VitsModel(config)
model.decoder.apply_weight_norm()
orig_checkpoint = torch.load(checkpoint_path, map_location=torch.device("cpu"), weights_only=True)
recursively_load_weights(orig_checkpoint["model"], model)
model.decoder.remove_weight_norm()
model.save_pretrained(pytorch_dump_folder_path)
tokenizer.save_pretrained(pytorch_dump_folder_path)
if repo_id:
print("Pushing to the hub...")
tokenizer.push_to_hub(repo_id)
model.push_to_hub(repo_id)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", default=None, type=str, help="Local path to original checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to vocab.txt")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--language", default=None, type=str, help="Tokenizer language (three-letter code)")
parser.add_argument("--num_speakers", default=None, type=int, help="Number of speakers")
parser.add_argument(
"--sampling_rate", default=None, type=int, help="Sampling rate on which the model was trained."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
args = parser.parse_args()
convert_checkpoint(
args.pytorch_dump_folder_path,
args.checkpoint_path,
args.config_path,
args.vocab_path,
args.language,
args.num_speakers,
args.sampling_rate,
args.push_to_hub,
)
| transformers/src/transformers/models/vits/convert_original_checkpoint.py/0 | {
"file_path": "transformers/src/transformers/models/vits/convert_original_checkpoint.py",
"repo_id": "transformers",
"token_count": 8728
} | 517 |
# coding=utf-8
# Copyright 2025 HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import gc
import json
import os
import re
import torch
from safetensors.torch import load_file
from transformers import (
MistralCommonTokenizer,
VoxtralConfig,
VoxtralForConditionalGeneration,
VoxtralProcessor,
WhisperFeatureExtractor,
)
from transformers.models.whisper.modeling_whisper import sinusoids
from transformers.utils.hub import cached_file
# fmt: off
STATE_DICT_MAPPING = {
# Text model keys
r"^output.weight": r"language_model.lm_head.weight",
r"^norm.weight": r"language_model.model.norm.weight",
r"^tok_embeddings.weight": r"language_model.model.embed_tokens.weight",
r"^layers.(\d+).attention_norm.weight": r"language_model.model.layers.\1.input_layernorm.weight",
r"^layers.(\d+).ffn_norm.weight": r"language_model.model.layers.\1.post_attention_layernorm.weight",
r"^layers.(\d+).attention.w(q|k|v|o).weight": r"language_model.model.layers.\1.self_attn.\2_proj.weight",
r"^layers.(\d+).feed_forward.w1.weight": r"language_model.model.layers.\1.mlp.gate_proj.weight",
r"^layers.(\d+).feed_forward.w2.weight": r"language_model.model.layers.\1.mlp.down_proj.weight",
r"^layers.(\d+).feed_forward.w3.weight": r"language_model.model.layers.\1.mlp.up_proj.weight",
r"mm_whisper_embeddings.tok_embeddings.weight": r"language_model.model.embed_tokens.weight",
# audio model keys
r"mm_whisper_embeddings.whisper_encoder\.conv_layers\.0\.(weight|bias)": r"audio_tower.conv1.\1",
r"mm_whisper_embeddings.whisper_encoder\.conv_layers\.1\.(weight|bias)": r"audio_tower.conv2.\1",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.norm\.(weight|bias)": r"audio_tower.layer_norm.\1",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.layers\.(\d+)\.attention\.w([qkv])\.(weight|bias)": r"audio_tower.layers.\1.self_attn.\2_proj.\3",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.layers\.(\d+)\.attention\.wo\.(weight|bias)": r"audio_tower.layers.\1.self_attn.out_proj.\2",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.layers\.(\d+)\.attention_norm\.(weight|bias)": r"audio_tower.layers.\1.self_attn_layer_norm.\2",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.layers\.(\d+)\.feed_forward\.w1\.(weight|bias)": r"audio_tower.layers.\1.fc1.\2",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.layers\.(\d+)\.feed_forward\.w2\.(weight|bias)": r"audio_tower.layers.\1.fc2.\2",
r"mm_whisper_embeddings.whisper_encoder\.transformer\.layers\.(\d+)\.ffn_norm\.(weight|bias)": r"audio_tower.layers.\1.final_layer_norm.\2",
r"mm_whisper_embeddings.audio_language_projection\.0\.weight": r"multi_modal_projector.linear_1.weight",
r"mm_whisper_embeddings.audio_language_projection\.2\.weight": r"multi_modal_projector.linear_2.weight",
}
# fmt: on
def convert_config(original_config: dict, max_position_embeddings: int = 131072):
original_audio_config = original_config.pop("multimodal")
original_audio_config = original_audio_config["whisper_model_args"]["encoder_args"]
original_text_config = original_config
# Text config
text_key_mapping = {
"hidden_size": "dim",
"num_hidden_layers": "n_layers",
"intermediate_size": "hidden_dim",
"num_attention_heads": "n_heads",
"num_key_value_heads": "n_kv_heads",
"rms_norm_eps": "norm_eps",
}
similar_text_keys_to_keep = [
"head_dim",
"vocab_size",
"rope_theta",
]
new_text_config_kwargs = {k: original_text_config[v] for k, v in text_key_mapping.items()}
new_text_config_kwargs.update({k: v for k, v in original_text_config.items() if k in similar_text_keys_to_keep})
# These are not always defined depending on `params.json`
new_text_config_kwargs["sliding_window"] = original_text_config.get("sliding_window", None)
new_text_config_kwargs["max_position_embeddings"] = original_text_config.get(
"max_seq_len", max_position_embeddings
)
# This may sometimes be a string in `params.json`
if new_text_config_kwargs["sliding_window"] is not None:
new_text_config_kwargs["sliding_window"] = int(new_text_config_kwargs["sliding_window"])
# Audio config
audio_key_mapping = {
"hidden_size": "dim",
"num_hidden_layers": "n_layers",
"intermediate_size": "hidden_dim",
"num_attention_heads": "n_heads",
"num_key_value_heads": "n_heads",
}
similar_audio_keys_to_keep = [
"head_dim",
"vocab_size",
]
new_audio_config_kwargs = {k: original_audio_config[v] for k, v in audio_key_mapping.items()}
new_audio_config_kwargs.update({k: v for k, v in original_audio_config.items() if k in similar_audio_keys_to_keep})
new_config = VoxtralConfig(
audio_config=new_audio_config_kwargs,
text_config=new_text_config_kwargs,
audio_token_id=24,
projector_hidden_act="gelu",
)
return new_config
def map_old_key_to_new(old_key):
"""Map of a key of the original state dict to the equivalent key in HF format"""
for pattern, replacement in STATE_DICT_MAPPING.items():
new_key, n_replace = re.subn(pattern, replacement, old_key)
# Early exit of the loop
if n_replace > 0:
return new_key
raise ValueError(f"Key: {old_key} could not be mapped (check the mapping).")
def permute_for_rope(tensor, n_heads, dim1, dim2):
"""Permute the weights for the ROPE formulation."""
tensor = tensor.view(n_heads, dim1 // n_heads // 2, 2, dim2)
tensor = tensor.transpose(1, 2)
tensor = tensor.reshape(dim1, dim2)
return tensor
def convert_state_dict(original_state_dict, config):
"""Convert a state dict file, when a single `nn.Module` is never sharded in different files (usual case)."""
new_dict = {}
num_attention_heads = config.num_attention_heads
hidden_size = config.hidden_size
head_dim = config.head_dim
num_key_value_heads = config.num_key_value_heads
key_value_dim = head_dim * num_key_value_heads
query_dim = head_dim * num_attention_heads
for old_key, tensor in original_state_dict.items():
new_key = map_old_key_to_new(old_key)
if "audio_tower" not in new_key:
if "q_proj" in new_key:
tensor = tensor.view(num_attention_heads, head_dim, hidden_size).reshape(query_dim, hidden_size)
tensor = permute_for_rope(tensor, num_attention_heads, query_dim, hidden_size)
elif "k_proj" in new_key:
tensor = tensor.view(num_key_value_heads, head_dim, hidden_size).reshape(key_value_dim, hidden_size)
tensor = permute_for_rope(tensor, num_key_value_heads, key_value_dim, hidden_size)
elif "v_proj" in new_key:
tensor = tensor.view(num_key_value_heads, head_dim, hidden_size).reshape(key_value_dim, hidden_size)
new_dict[new_key] = tensor
return new_dict
def write_model(
input_path_or_repo,
model_name,
config_name,
output_dir,
safe_serialization=True,
):
print("Converting the model.")
os.makedirs(output_dir, exist_ok=True)
# --------------
# convert config
# --------------
config_path = cached_file(input_path_or_repo, config_name)
with open(config_path, "r") as f:
original_config = json.load(f)
config = convert_config(original_config)
model = VoxtralForConditionalGeneration(config)
# ---------------
# convert weights
# ---------------
model_path = cached_file(input_path_or_repo, model_name)
print(f"Fetching all parameters from the checkpoint at {model_path}...")
state_dict = load_file(model_path)
print("Converting model...")
converted_state_dict = convert_state_dict(state_dict, config.text_config)
# we need to add embed positions as they are not in the state dict
with torch.no_grad(), torch.device("cuda"):
# TODO: @eustlb, we are here creating on GPU
# vllm initalizes on device, while we save in state dict
embed_positions_weight = sinusoids(config.audio_config.max_source_positions, config.audio_config.hidden_size)
converted_state_dict["audio_tower.embed_positions.weight"] = embed_positions_weight.cpu()
# -------------------------
# load the weights and save
# -------------------------
print("Loading the checkpoint in a Voxtral model.")
with torch.device("meta"):
model = VoxtralForConditionalGeneration(config)
model.load_state_dict(converted_state_dict, strict=True, assign=True)
print("Checkpoint loaded successfully.")
del model.config._name_or_path
del model.generation_config._from_model_config
model.generation_config.pad_token_id = 11
print("Saving the model.")
model.save_pretrained(output_dir, safe_serialization=safe_serialization)
del state_dict, model
# Safety check: reload the converted model
gc.collect()
print("Reloading the model to check if it's saved correctly.")
VoxtralForConditionalGeneration.from_pretrained(output_dir, dtype=torch.bfloat16, device_map="auto")
print("Model reloaded successfully.")
def write_processor(input_path_or_repo: str, feature_extractor_path_or_repo: str, output_dir: str):
tokenizer = MistralCommonTokenizer.from_pretrained(input_path_or_repo)
feature_extractor = WhisperFeatureExtractor.from_pretrained(feature_extractor_path_or_repo)
print("Creating the processor...")
# Create the processor and save it
processor = VoxtralProcessor(
feature_extractor=feature_extractor,
tokenizer=tokenizer,
)
processor.save_pretrained(output_dir)
print("Processor saved successfully.")
def main():
parser = argparse.ArgumentParser(description="Convert Voxtral weights to Hugging Face format")
parser.add_argument(
"--input_path_or_repo",
type=str,
required=True,
help="Path or repo containing Csm weights",
)
parser.add_argument(
"--model_name",
type=str,
required=True,
help="Name of the model in input_path_or_repo",
)
parser.add_argument(
"--config_name",
type=str,
required=True,
help="Name of the config in input_path_or_repo",
)
parser.add_argument(
"--feature_extractor_path_or_repo",
type=str,
required=True,
help="Path or repo containing the feature extractor",
)
parser.add_argument(
"--output_dir",
help="Location to write HF model and tokenizer",
)
parser.add_argument(
"--safe_serialization", action="store_true", default=True, help="Whether or not to save using `safetensors`."
)
args = parser.parse_args()
write_model(
args.input_path_or_repo,
args.model_name,
args.config_name,
args.output_dir,
safe_serialization=args.safe_serialization,
)
write_processor(
args.input_path_or_repo,
args.feature_extractor_path_or_repo,
args.output_dir,
)
if __name__ == "__main__":
main()
| transformers/src/transformers/models/voxtral/convert_voxtral_weights_to_hf.py/0 | {
"file_path": "transformers/src/transformers/models/voxtral/convert_voxtral_weights_to_hf.py",
"repo_id": "transformers",
"token_count": 5291
} | 518 |
# coding=utf-8
# Copyright 2024 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convert Wav2Vec2Bert BERT checkpoint."""
import argparse
import torch
import torchaudio
from fairseq2.data import Collater
from fairseq2.data.audio import WaveformToFbankConverter
from fairseq2.nn.padding import get_seqs_and_padding_mask
from seamless_communication.models.conformer_shaw import load_conformer_shaw_model
from transformers import (
SeamlessM4TFeatureExtractor,
Wav2Vec2BertConfig,
Wav2Vec2BertModel,
logging,
)
logging.set_verbosity_info()
logger = logging.get_logger(__name__)
wav2vec_convert_list = [
("encoder_frontend.model_dim_proj", "feature_projection.projection"),
("encoder_frontend.post_extract_layer_norm", "feature_projection.layer_norm"),
("encoder_frontend.pos_encoder.conv", "encoder.pos_conv_embed.conv"),
("encoder.inner.layers", "encoder.layers"),
("encoder.inner_layer_norm", "encoder.layer_norm"),
("encoder.adaptor_layers", "adapter.layers"),
("inner_proj", "intermediate_dense"),
("self_attn.output_proj", "self_attn.linear_out"),
("output_proj", "output_dense"),
("self_attn.k_proj", "self_attn.linear_k"),
("self_attn.v_proj", "self_attn.linear_v"),
("self_attn.q_proj", "self_attn.linear_q"),
("self_attn.sdpa.u_bias", "self_attn.pos_bias_u"),
("self_attn.sdpa.v_bias", "self_attn.pos_bias_v"),
("self_attn.sdpa.rel_k_embed", "self_attn.distance_embedding"),
("self_attn.sdpa.r_proj", "self_attn.linear_pos"),
("conv.pointwise_conv1", "conv_module.pointwise_conv1"),
("conv.pointwise_conv2", "conv_module.pointwise_conv2"),
("conv.depthwise_conv", "conv_module.depthwise_conv"),
("conv.layer_norm", "conv_module.depthwise_layer_norm"),
("conv_layer_norm", "conv_module.layer_norm"),
("encoder.proj1", "intermediate_ffn.intermediate_dense"),
("encoder.proj2", "intermediate_ffn.output_dense"),
("encoder.layer_norm", "inner_layer_norm"),
("masker.temporal_mask_embed", "masked_spec_embed"),
]
keys_to_remove = {
"quantizer.entry_proj",
"final_proj",
"final_target_proj",
"quantizer.entries",
"quantizer.num_updates",
}
def param_count(model):
return sum(p[1].numel() for p in model.named_parameters() if "final_proj" not in p[0])
def _convert_model(
original_model,
hf_model,
convert_list,
):
state_dict = original_model.state_dict()
for k, v in list(state_dict.items()):
new_key = k
for old_layer_name, new_layer_name in convert_list:
if old_layer_name in new_key:
new_key = new_key.replace(old_layer_name, new_layer_name)
# must do it by hand
if ".layer_norm" in new_key and new_key.split(".layer_norm")[0][-1].isnumeric():
new_key = new_key.replace("layer_norm", "final_layer_norm")
add_key = True
for key in keys_to_remove:
if key in new_key:
state_dict.pop(k)
add_key = False
break
if add_key:
state_dict[new_key] = state_dict.pop(k)
extra_keys = set(state_dict.keys()) - set(hf_model.state_dict().keys())
extra_keys = set({k for k in extra_keys if "num_updates" not in k}) # filter unnecessary param
missing_keys = set(hf_model.state_dict().keys()) - set(state_dict.keys())
if len(extra_keys) != 0:
raise ValueError(f"extra keys found: {extra_keys}")
if len(missing_keys) != 0:
raise ValueError(f"missing keys: {missing_keys}")
hf_model.load_state_dict(state_dict, strict=True)
n_params = param_count(hf_model)
logger.info(f"model loaded: {round(n_params / 1e6, 1)}M params")
hf_model.eval()
del state_dict
return hf_model
@torch.no_grad()
def convert_wav2vec2_bert_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
config_path=None,
repo_id=None,
):
"""
Copy/paste/tweak model's weights to transformers design.
"""
if config_path is not None:
config = Wav2Vec2BertConfig.from_pretrained(config_path, hidden_act="swish")
else:
config = Wav2Vec2BertConfig(apply_spec_augment=False)
hf_wav2vec = Wav2Vec2BertModel(config)
model = load_conformer_shaw_model(checkpoint_path, dtype=torch.float32)
model.eval()
hf_wav2vec = _convert_model(model, hf_wav2vec, wav2vec_convert_list)
hf_wav2vec.save_pretrained(pytorch_dump_folder_path)
if repo_id:
hf_wav2vec.push_to_hub(repo_id, create_pr=True)
# save feature extractor
fe = SeamlessM4TFeatureExtractor(padding_value=1)
fe._set_processor_class("Wav2Vec2BertProcessor")
fe.save_pretrained(pytorch_dump_folder_path)
if repo_id:
fe.push_to_hub(repo_id, create_pr=True)
if args.audio_path:
waveform, sample_rate = torchaudio.load(args.audio_path)
waveform = torchaudio.functional.resample(waveform, sample_rate, fe.sampling_rate)
fbank_converter = WaveformToFbankConverter(
num_mel_bins=80,
waveform_scale=2**15,
channel_last=True,
standardize=True,
dtype=torch.float32,
)
collater = Collater(pad_value=1)
decoded_audio = {"waveform": waveform.T, "sample_rate": fe.sampling_rate, "format": -1}
src = collater(fbank_converter(decoded_audio))["fbank"]
seqs, padding_mask = get_seqs_and_padding_mask(src)
with torch.inference_mode():
seqs, padding_mask = model.encoder_frontend(seqs, padding_mask)
original_output, padding_mask = model.encoder(seqs, padding_mask)
hf_wav2vec.eval()
inputs = fe(waveform, return_tensors="pt", padding=True)
with torch.no_grad():
outputs = hf_wav2vec(**inputs)
torch.testing.assert_close(original_output, outputs.last_hidden_state, rtol=5e-3, atol=5e-3)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the output PyTorch model.",
)
parser.add_argument(
"--checkpoint_path", default="conformer_shaw", type=str, help="Path to seamless communication checkpoint"
)
parser.add_argument(
"--config_path",
default=None,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--repo_id", default=None, type=str, help="Push to this repo id if precised.")
parser.add_argument(
"--audio_path",
default=None,
type=str,
help="If specified, check that the original model and the converted model produce the same outputs.",
)
args = parser.parse_args()
convert_wav2vec2_bert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.repo_id
)
| transformers/src/transformers/models/wav2vec2_bert/convert_wav2vec2_seamless_checkpoint.py/0 | {
"file_path": "transformers/src/transformers/models/wav2vec2_bert/convert_wav2vec2_seamless_checkpoint.py",
"repo_id": "transformers",
"token_count": 3153
} | 519 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""X-CLIP model configuration"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class XCLIPTextConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`XCLIPModel`]. It is used to instantiate an X-CLIP
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the X-CLIP
[microsoft/xclip-base-patch32](https://huggingface.co/microsoft/xclip-base-patch32) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 49408):
Vocabulary size of the X-CLIP text model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed when calling [`XCLIPModel`].
hidden_size (`int`, *optional*, defaults to 512):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Transformer encoder.
max_position_embeddings (`int`, *optional*, defaults to 77):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
Example:
```python
>>> from transformers import XCLIPTextModel, XCLIPTextConfig
>>> # Initializing a XCLIPTextModel with microsoft/xclip-base-patch32 style configuration
>>> configuration = XCLIPTextConfig()
>>> # Initializing a XCLIPTextConfig from the microsoft/xclip-base-patch32 style configuration
>>> model = XCLIPTextModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "xclip_text_model"
base_config_key = "text_config"
def __init__(
self,
vocab_size=49408,
hidden_size=512,
intermediate_size=2048,
num_hidden_layers=12,
num_attention_heads=8,
max_position_embeddings=77,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
**kwargs,
):
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.max_position_embeddings = max_position_embeddings
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
class XCLIPVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`XCLIPModel`]. It is used to instantiate an X-CLIP
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the X-CLIP
[microsoft/xclip-base-patch32](https://huggingface.co/microsoft/xclip-base-patch32) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
mit_hidden_size (`int`, *optional*, defaults to 512):
Dimensionality of the encoder layers of the Multiframe Integration Transformer (MIT).
mit_intermediate_size (`int`, *optional*, defaults to 2048):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Multiframe Integration Transformer
(MIT).
mit_num_hidden_layers (`int`, *optional*, defaults to 1):
Number of hidden layers in the Multiframe Integration Transformer (MIT).
mit_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads for each attention layer in the Multiframe Integration Transformer (MIT).
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 32):
The size (resolution) of each patch.
hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"`, `"gelu_new"` and `"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1):
A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
testing).
drop_path_rate (`float`, *optional*, defaults to 0.0):
Stochastic depth rate.
Example:
```python
>>> from transformers import XCLIPVisionModel, XCLIPVisionConfig
>>> # Initializing a XCLIPVisionModel with microsoft/xclip-base-patch32 style configuration
>>> configuration = XCLIPVisionConfig()
>>> # Initializing a XCLIPVisionModel model from the microsoft/xclip-base-patch32 style configuration
>>> model = XCLIPVisionModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "xclip_vision_model"
base_config_key = "vision_config"
def __init__(
self,
hidden_size=768,
intermediate_size=3072,
num_hidden_layers=12,
num_attention_heads=12,
mit_hidden_size=512,
mit_intermediate_size=2048,
mit_num_hidden_layers=1,
mit_num_attention_heads=8,
num_channels=3,
image_size=224,
patch_size=32,
num_frames=8,
hidden_act="quick_gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
drop_path_rate=0.0,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.mit_hidden_size = mit_hidden_size
self.mit_intermediate_size = mit_intermediate_size
self.mit_num_hidden_layers = mit_num_hidden_layers
self.mit_num_attention_heads = mit_num_attention_heads
self.num_channels = num_channels
self.patch_size = patch_size
self.num_frames = num_frames
self.image_size = image_size
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.attention_dropout = attention_dropout
self.layer_norm_eps = layer_norm_eps
self.hidden_act = hidden_act
self.drop_path_rate = drop_path_rate
class XCLIPConfig(PretrainedConfig):
r"""
[`XCLIPConfig`] is the configuration class to store the configuration of a [`XCLIPModel`]. It is used to
instantiate X-CLIP model according to the specified arguments, defining the text model and vision model configs.
Instantiating a configuration with the defaults will yield a similar configuration to that of the X-CLIP
[microsoft/xclip-base-patch32](https://huggingface.co/microsoft/xclip-base-patch32) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
text_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`XCLIPTextConfig`].
vision_config (`dict`, *optional*):
Dictionary of configuration options used to initialize [`XCLIPVisionConfig`].
projection_dim (`int`, *optional*, defaults to 512):
Dimensionality of text and vision projection layers.
prompt_layers (`int`, *optional*, defaults to 2):
Number of layers in the video specific prompt generator.
prompt_alpha (`float`, *optional*, defaults to 0.1):
Alpha value to use in the video specific prompt generator.
prompt_hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
The non-linear activation function (function or string) in the video specific prompt generator. If string,
`"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
prompt_num_attention_heads (`int`, *optional*, defaults to 8):
Number of attention heads in the cross-attention of the video specific prompt generator.
prompt_attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for the attention layers in the video specific prompt generator.
prompt_projection_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for the projection layers in the video specific prompt generator.
logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
The initial value of the *logit_scale* parameter. Default is used as per the original XCLIP implementation.
kwargs (*optional*):
Dictionary of keyword arguments.
"""
model_type = "xclip"
sub_configs = {"text_config": XCLIPTextConfig, "vision_config": XCLIPVisionConfig}
def __init__(
self,
text_config=None,
vision_config=None,
projection_dim=512,
prompt_layers=2,
prompt_alpha=0.1,
prompt_hidden_act="quick_gelu",
prompt_num_attention_heads=8,
prompt_attention_dropout=0.0,
prompt_projection_dropout=0.0,
logit_scale_init_value=2.6592,
**kwargs,
):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
text_config_dict = kwargs.pop("text_config_dict", None)
vision_config_dict = kwargs.pop("vision_config_dict", None)
super().__init__(**kwargs)
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
text_config = {}
# This is the complete result when using `text_config_dict`.
_text_config_dict = XCLIPTextConfig(**text_config_dict).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
message = (
f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
f'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
message = (
f"`text_config_dict` is provided which will be used to initialize `XCLIPTextConfig`. The "
f'value `text_config["{key}"]` will be overridden.'
)
logger.info(message)
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict)
if vision_config_dict is not None:
if vision_config is None:
vision_config = {}
# This is the complete result when using `vision_config_dict`.
_vision_config_dict = XCLIPVisionConfig(**vision_config_dict).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_vision_config_dict["id2label"] = {
str(key): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
message = (
f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
f'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
message = (
f"`vision_config_dict` is provided which will be used to initialize `XCLIPVisionConfig`. "
f'The value `vision_config["{key}"]` will be overridden.'
)
logger.info(message)
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict)
if text_config is None:
text_config = {}
logger.info("`text_config` is `None`. Initializing the `XCLIPTextConfig` with default values.")
if vision_config is None:
vision_config = {}
logger.info("`vision_config` is `None`. initializing the `XCLIPVisionConfig` with default values.")
self.text_config = XCLIPTextConfig(**text_config)
self.vision_config = XCLIPVisionConfig(**vision_config)
self.projection_dim = projection_dim
self.prompt_layers = prompt_layers
self.prompt_alpha = prompt_alpha
self.prompt_hidden_act = prompt_hidden_act
self.prompt_num_attention_heads = prompt_num_attention_heads
self.prompt_attention_dropout = prompt_attention_dropout
self.prompt_projection_dropout = prompt_projection_dropout
self.logit_scale_init_value = logit_scale_init_value
self.initializer_factor = 1.0
__all__ = ["XCLIPConfig", "XCLIPTextConfig", "XCLIPVisionConfig"]
| transformers/src/transformers/models/x_clip/configuration_x_clip.py/0 | {
"file_path": "transformers/src/transformers/models/x_clip/configuration_x_clip.py",
"repo_id": "transformers",
"token_count": 7194
} | 520 |
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOS model configuration"""
from collections import OrderedDict
from collections.abc import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
logger = logging.get_logger(__name__)
class YolosConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`YolosModel`]. It is used to instantiate a YOLOS
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the YOLOS
[hustvl/yolos-base](https://huggingface.co/hustvl/yolos-base) architecture.
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-12):
The epsilon used by the layer normalization layers.
image_size (`list[int]`, *optional*, defaults to `[512, 864]`):
The size (resolution) of each image.
patch_size (`int`, *optional*, defaults to 16):
The size (resolution) of each patch.
num_channels (`int`, *optional*, defaults to 3):
The number of input channels.
qkv_bias (`bool`, *optional*, defaults to `True`):
Whether to add a bias to the queries, keys and values.
num_detection_tokens (`int`, *optional*, defaults to 100):
The number of detection tokens.
use_mid_position_embeddings (`bool`, *optional*, defaults to `True`):
Whether to use the mid-layer position encodings.
auxiliary_loss (`bool`, *optional*, defaults to `False`):
Whether auxiliary decoding losses (loss at each decoder layer) are to be used.
class_cost (`float`, *optional*, defaults to 1):
Relative weight of the classification error in the Hungarian matching cost.
bbox_cost (`float`, *optional*, defaults to 5):
Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost.
giou_cost (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost.
bbox_loss_coefficient (`float`, *optional*, defaults to 5):
Relative weight of the L1 bounding box loss in the object detection loss.
giou_loss_coefficient (`float`, *optional*, defaults to 2):
Relative weight of the generalized IoU loss in the object detection loss.
eos_coefficient (`float`, *optional*, defaults to 0.1):
Relative classification weight of the 'no-object' class in the object detection loss.
Example:
```python
>>> from transformers import YolosConfig, YolosModel
>>> # Initializing a YOLOS hustvl/yolos-base style configuration
>>> configuration = YolosConfig()
>>> # Initializing a model (with random weights) from the hustvl/yolos-base style configuration
>>> model = YolosModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "yolos"
def __init__(
self,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
layer_norm_eps=1e-12,
image_size=[512, 864],
patch_size=16,
num_channels=3,
qkv_bias=True,
num_detection_tokens=100,
use_mid_position_embeddings=True,
auxiliary_loss=False,
class_cost=1,
bbox_cost=5,
giou_cost=2,
bbox_loss_coefficient=5,
giou_loss_coefficient=2,
eos_coefficient=0.1,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.num_detection_tokens = num_detection_tokens
self.use_mid_position_embeddings = use_mid_position_embeddings
self.auxiliary_loss = auxiliary_loss
# Hungarian matcher
self.class_cost = class_cost
self.bbox_cost = bbox_cost
self.giou_cost = giou_cost
# Loss coefficients
self.bbox_loss_coefficient = bbox_loss_coefficient
self.giou_loss_coefficient = giou_loss_coefficient
self.eos_coefficient = eos_coefficient
class YolosOnnxConfig(OnnxConfig):
torch_onnx_minimum_version = version.parse("1.11")
@property
def inputs(self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
]
)
@property
def atol_for_validation(self) -> float:
return 1e-4
@property
def default_onnx_opset(self) -> int:
return 12
__all__ = ["YolosConfig", "YolosOnnxConfig"]
| transformers/src/transformers/models/yolos/configuration_yolos.py/0 | {
"file_path": "transformers/src/transformers/models/yolos/configuration_yolos.py",
"repo_id": "transformers",
"token_count": 2942
} | 521 |
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# This file was automatically generated from src/transformers/models/zamba2/modular_zamba2.py.
# Do NOT edit this file manually as any edits will be overwritten by the generation of
# the file from the modular. If any change should be done, please apply the change to the
# modular_zamba2.py file directly. One of our CI enforces this.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# coding=utf-8
# Copyright 2024 Zyphra Technologies and the HuggingFace Inc. team. All rights reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import re
from itertools import cycle
from typing import Any, Callable, Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACT2FN
from ...cache_utils import Cache
from ...generation import GenerationMixin
from ...modeling_attn_mask_utils import AttentionMaskConverter
from ...modeling_flash_attention_utils import FlashAttentionKwargs
from ...modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
from ...processing_utils import Unpack
from ...utils import auto_docstring, logging
from ...utils.deprecation import deprecate_kwarg
from ...utils.import_utils import is_causal_conv1d_available, is_mamba_ssm_available
from .configuration_zamba2 import Zamba2Config
if is_mamba_ssm_available():
from mamba_ssm.ops.triton.selective_state_update import selective_state_update
from mamba_ssm.ops.triton.ssd_combined import mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined
else:
selective_state_update, mamba_chunk_scan_combined, mamba_split_conv1d_scan_combined = None, None, None
if is_causal_conv1d_available():
from causal_conv1d import causal_conv1d_fn, causal_conv1d_update
else:
causal_conv1d_update, causal_conv1d_fn = None, None
logger = logging.get_logger(__name__)
class Zamba2RMSNormGated(torch.nn.Module):
def __init__(self, hidden_size, group_size, eps=1e-6):
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
self.group_size = group_size
def forward(self, hidden_states, gate=None):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
if gate is not None:
hidden_states = hidden_states * nn.functional.silu(gate.to(torch.float32))
*prefix_dims, last_dim = hidden_states.shape
group_count = last_dim // self.group_size
hidden_states_group = hidden_states.view(*prefix_dims, group_count, self.group_size)
variance = hidden_states_group.pow(2).mean(-1, keepdim=True)
hidden_states_group = hidden_states_group * torch.rsqrt(variance + self.variance_epsilon)
hidden_states = hidden_states_group.view(*prefix_dims, group_count * self.group_size)
return self.weight * hidden_states.to(input_dtype)
class Zamba2RMSNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Zamba2RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
class Zamba2HybridDynamicCache:
"""
A dynamic cache that can handle both the attention cache (which has a seq_len dimension) and the mamba cache
(which has a constant shape regardless of seq_len).
This cache has two sets of lists of tensors: `key_cache` and `value_cache` for attention cache and `conv_states`
and `ssm_states` for mamba cache. Each of these lists has `num_layers` tensors. The expected shape for each tensor
For attention layers, `key_cache` and `value_cache` have a shape of `(batch_size, num_heads, seq_len, head_dim)`,
while `conv_states` and `ssm_states` have a shape of `(batch_size, 0)` (empty tensors).
For mamba layers, `key_cache` and `value_cache` have a shape of `(batch_size, 0)` (empty tensors),
while `conv_states` represents the convolution state and has a shape of `(batch_size, d_inner, d_conv)`,
and `ssm_states` represents the ssm state and has a shape of `(batch_size, d_inner, d_state)`.
"""
is_compileable = False
def __init__(
self, config: Zamba2Config, batch_size: int, dtype: torch.dtype = torch.float16, device: Optional[str] = None
):
self.dtype = dtype
self.layers_block_type = config.layers_block_type
self.has_previous_state = False
self.intermediate_size = int(config.mamba_expand * config.hidden_size)
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.n_mamba_heads = config.n_mamba_heads
self.transformer_layers = []
self._modules = {}
self._parameters = {}
self._buffers = {}
self.conv_states = {}
self.ssm_states = {}
for i in range(config.num_hidden_layers):
self.conv_states[i] = torch.zeros(
batch_size,
self.intermediate_size + 2 * config.mamba_ngroups * config.mamba_d_state,
self.conv_kernel_size,
device=device,
dtype=dtype,
)
self.ssm_states[i] = torch.zeros(
batch_size, self.n_mamba_heads, config.mamba_headdim, self.ssm_state_size, device=device, dtype=dtype
)
if self.layers_block_type[i] == "hybrid":
self.transformer_layers.append(i)
self.key_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
self.value_cache = [torch.tensor([[]] * batch_size, device=device) for _ in range(config.num_hidden_layers)]
def __len__(self):
return len(self.key_cache)
def __getitem__(self, layer_idx: int) -> tuple[torch.Tensor, torch.Tensor]:
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def update(
self,
key_states: torch.Tensor,
value_states: torch.Tensor,
layer_idx: int,
cache_kwargs: Optional[dict[str, Any]] = None,
) -> tuple[torch.Tensor, torch.Tensor]:
# Update the cache
if self.key_cache[layer_idx].shape[-1] == 0:
self.key_cache[layer_idx] = key_states
self.value_cache[layer_idx] = value_states
else:
self.key_cache[layer_idx] = torch.cat([self.key_cache[layer_idx], key_states], dim=2)
self.value_cache[layer_idx] = torch.cat([self.value_cache[layer_idx], value_states], dim=2)
return self.key_cache[layer_idx], self.value_cache[layer_idx]
def reorder_cache(self, beam_idx: torch.LongTensor):
"""Reorders the cache for beam search, given the selected beam indices."""
for layer_idx in range(len(self.key_cache)):
device = self.key_cache[layer_idx].device
self.key_cache[layer_idx] = self.key_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.value_cache[layer_idx].device
self.value_cache[layer_idx] = self.value_cache[layer_idx].index_select(0, beam_idx.to(device))
device = self.conv_states[layer_idx].device
self.conv_states[layer_idx] = self.conv_states[layer_idx].index_select(0, beam_idx.to(device))
device = self.ssm_states[layer_idx].device
self.ssm_states[layer_idx] = self.ssm_states[layer_idx].index_select(0, beam_idx.to(device))
def get_seq_length(self, layer_idx: Optional[int] = 0) -> int:
"""Returns the sequence length of the cached states. A layer index can be optionally passed."""
# take any layer that contains cache and not empty tensor
layer_idx = self.transformer_layers[0] if layer_idx not in self.transformer_layers else layer_idx
if len(self.key_cache) <= layer_idx or self.key_cache[layer_idx].numel() == 0:
return 0
return self.key_cache[layer_idx].shape[-2]
def update_conv_state(
self, layer_idx: int, new_conv_state: torch.Tensor, cache_position: torch.LongTensor
) -> torch.Tensor:
conv_state = self.conv_states[layer_idx]
cache_position = cache_position.clamp(0, self.conv_kernel_size - 1)
conv_state = conv_state.roll(shifts=-1, dims=-1)
conv_state[:, :, cache_position] = new_conv_state.to(conv_state.device)
self.conv_states[layer_idx].zero_()
self.conv_states[layer_idx] += conv_state
return self.conv_states[layer_idx]
def reset(self):
self.conv_states.zero_()
self.ssm_states.zero_()
class Zamba2RotaryEmbedding(nn.Module):
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, config: Zamba2Config, device=None):
super().__init__()
# BC: "rope_type" was originally "type"
if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
else:
self.rope_type = "default"
self.max_seq_len_cached = config.max_position_embeddings
self.original_max_seq_len = config.max_position_embeddings
self.config = config
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
self.register_buffer("inv_freq", inv_freq, persistent=False)
self.original_inv_freq = self.inv_freq
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed, k_embed
class Zamba2Attention(nn.Module):
"""
Multi-headed attention from 'Attention Is All You Need' paper.
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
(see fig. 2 in https://huggingface.co/papers/2405.16712).
Additionally, replaced
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
Finally, this attention layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this
layer is tied, un-tied adapters (formally the same as LoRA but used in the base model) modules are added to the q, k, v projectors to increase
expressivity with a small memory overhead (see Fig. 2 of https://huggingface.co/papers/2411.15242).
"""
def __init__(
self,
config: Zamba2Config,
layer_idx: Optional[int] = None,
num_fwd_mem_blocks: Optional[int] = None,
block_id: Optional[int] = None,
):
super().__init__()
self.config = config
self.layer_idx = layer_idx
self.attention_hidden_size = config.attention_hidden_size
self.head_dim = config.attention_head_dim
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.scaling = (self.head_dim / 2) ** -0.5
self.is_causal = True
self.attention_dropout = config.attention_dropout
self.q_proj = nn.Linear(config.attention_hidden_size, config.num_attention_heads * self.head_dim, bias=False)
self.k_proj = nn.Linear(config.attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.v_proj = nn.Linear(config.attention_hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
self.num_fwd_mem_blocks = num_fwd_mem_blocks
self.layer_block_map = config.hybrid_layer_ids
self.block_id = block_id
if config.use_shared_attention_adapter:
self.linear_q_adapter_list = nn.ModuleList([])
self.linear_k_adapter_list = nn.ModuleList([])
self.linear_v_adapter_list = nn.ModuleList([])
for i in range(self.num_fwd_mem_blocks):
if i % config.num_mem_blocks == block_id:
linear_q_adapter = nn.Sequential(
nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False),
nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False),
)
linear_k_adapter = nn.Sequential(
nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False),
nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False),
)
linear_v_adapter = nn.Sequential(
nn.Linear(self.attention_hidden_size, self.config.adapter_rank, bias=False),
nn.Linear(self.config.adapter_rank, self.attention_hidden_size, bias=False),
)
else:
linear_q_adapter = nn.Identity()
linear_k_adapter = nn.Identity()
linear_v_adapter = nn.Identity()
self.linear_q_adapter_list.append(linear_q_adapter)
self.linear_k_adapter_list.append(linear_k_adapter)
self.linear_v_adapter_list.append(linear_v_adapter)
self.layer_dic = {value: index for index, value in enumerate(self.layer_block_map)}
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
layer_idx: int,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Zamba2HybridDynamicCache] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.head_dim)
query_states = self.q_proj(hidden_states)
key_states = self.k_proj(hidden_states)
value_states = self.v_proj(hidden_states)
if self.config.use_shared_attention_adapter:
adapter_layer_idx = self.layer_dic[layer_idx]
query_states = query_states + self.linear_q_adapter_list[adapter_layer_idx](hidden_states)
key_states = key_states + self.linear_k_adapter_list[adapter_layer_idx](hidden_states)
value_states = value_states + self.linear_v_adapter_list[adapter_layer_idx](hidden_states)
query_states = query_states.view(hidden_shape).transpose(1, 2)
key_states = key_states.view(hidden_shape).transpose(1, 2)
value_states = value_states.view(hidden_shape).transpose(1, 2)
if self.config.use_mem_rope:
cos, sin = position_embeddings
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
if past_key_values is not None:
key_states, value_states = past_key_values.update(key_states, value_states, layer_idx)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
**kwargs,
)
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
# Helper methods for segment sum computation
def pad_tensor_by_size(input_tensor: torch.Tensor, pad_size: int):
"""
Padding x tensor with `pad_size` on the seq_len dim (dim=1)
Assumes that we only have tensors of either size 4 or 3
"""
pad_shape = (0, 0, 0, 0, 0, pad_size, 0, 0) if len(input_tensor.shape) == 4 else (0, 0, 0, pad_size, 0, 0)
return torch.nn.functional.pad(input_tensor, pad_shape, mode="constant", value=0)
def reshape_into_chunks(input_tensor, pad_size, chunk_size):
"""
Padding input_tensor with `pad_size` on the seq_len dim (dim=1) and
simultaneously splitting it into chunk sequences.
Assumes that we only have tensors of either size 4 or 3
"""
# [bsz, seq_len, ...] -> [bsz, seq_len multiple of chunk_size, ...]
input_tensor = pad_tensor_by_size(input_tensor, pad_size)
if len(input_tensor.shape) == 3:
# [bsz, seq_len multiple of chunk_size, num_heads] -> [bsz, -1, chunk_size, num_heads]
return input_tensor.reshape(input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2])
else:
# [bsz, seq_len multiple of chunk_size, num_heads, head_dim or state_size] -> [bsz, -1, chunk_size, num_heads, head_dim or state_size]
return input_tensor.reshape(
input_tensor.shape[0], -1, chunk_size, input_tensor.shape[2], input_tensor.shape[3]
)
def segment_sum(input_tensor):
"""
More stable segment sum calculation. Uses cumulative sums and masking instead of direct subtractions.
"""
chunk_size = input_tensor.size(-1)
# 1. expand input tensor to have an additional dimension and repeat along that dimension
# [..., chunk_size] -> [..., chunk_size, chunk_size]
input_tensor = input_tensor[..., None].expand(*input_tensor.size(), chunk_size)
# 2. create a lower triangular mask with the diagonal set to 0 to 0 out elements above diag
mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=-1)
input_tensor = input_tensor.masked_fill(~mask, 0)
# 3. compute actual cumsum
tensor_segsum = torch.cumsum(input_tensor, dim=-2)
# 4. apply mask to keep only the lower triangular part of the cumulative sum result (incl diagonal this time)
mask = torch.tril(torch.ones(chunk_size, chunk_size, device=input_tensor.device, dtype=torch.bool), diagonal=0)
tensor_segsum = tensor_segsum.masked_fill(~mask, -torch.inf)
return tensor_segsum
is_fast_path_available = all((selective_state_update, causal_conv1d_fn, causal_conv1d_update))
class Zamba2MambaMixer(nn.Module):
"""
Compute ∆, A, B, C, and D the state space parameters and compute the `contextualized_states`.
A, D are input independent (see Mamba paper [1] Section 3.5.2 "Interpretation of A" for why A isn't selective)
∆, B, C are input-dependent (this is a key difference between Mamba and the linear time invariant S4,
and is why Mamba is called **selective** state spaces)
"""
def __init__(self, config: Zamba2Config, layer_idx: Optional[int] = None):
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.ssm_state_size = config.mamba_d_state
self.conv_kernel_size = config.mamba_d_conv
self.intermediate_size = int(config.mamba_expand * self.hidden_size)
self.layer_idx = layer_idx
self.use_conv_bias = config.use_conv_bias
self.activation = "silu"
self.act = nn.SiLU()
self.use_mem_eff_path = config.use_mem_eff_path
self.n_groups = config.mamba_ngroups
self.head_dim = config.mamba_headdim
self.num_heads = self.config.n_mamba_heads
self.chunk_size = config.chunk_size
self.time_step_limit = config.time_step_limit
self.time_step_min = config.time_step_min
self.time_step_max = config.time_step_max
self.conv_dim = self.intermediate_size + 2 * self.n_groups * self.ssm_state_size
self.conv1d = nn.Conv1d(
in_channels=self.conv_dim,
out_channels=self.conv_dim,
bias=True,
kernel_size=config.mamba_d_conv,
groups=self.conv_dim,
padding=config.mamba_d_conv - 1,
)
# projection of the input hidden states
projection_size = self.intermediate_size + self.conv_dim + self.num_heads
self.in_proj = nn.Linear(
self.hidden_size,
projection_size,
bias=config.add_bias_linear,
)
# selective projection used to make dt, B and C input dependent
# time step projection (discretization)
# instantiate once and copy inv_dt in init_weights of PretrainedModel
self.dt_bias = nn.Parameter(torch.ones(self.num_heads))
# S4D real initialization. These are not discretized!
# The core is to load them, compute the discrete states, then write the updated state. Keeps the memory bounded
A = torch.arange(1, self.num_heads + 1)
self.A_log = nn.Parameter(torch.log(A))
self.A_log._no_weight_decay = True
self.norm = Zamba2RMSNormGated(
self.intermediate_size, group_size=self.intermediate_size // self.n_groups, eps=1e-5
)
self.D = nn.Parameter(torch.ones(self.num_heads))
self.D._no_weight_decay = True
self.out_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear)
if not is_fast_path_available:
logger.warning_once(
"The fast path is not available because on of `(selective_state_update, causal_conv1d_fn, causal_conv1d_update)`"
" is None. Falling back to the naive implementation. To install follow https://github.com/state-spaces/mamba/#installation and"
" https://github.com/Dao-AILab/causal-conv1d"
)
def cuda_kernels_forward(
self,
hidden_states: torch.Tensor,
cache_params: Optional[Zamba2HybridDynamicCache] = None,
attention_mask: Optional[torch.Tensor] = None,
):
# set up dimensions for reshapes later
batch_size, seq_len, _ = hidden_states.shape
groups_time_state_size = self.n_groups * self.ssm_state_size
d_to_remove = 2 * self.intermediate_size + 2 * self.n_groups * self.ssm_state_size + self.num_heads
# getting projected states from cache if it exists
if cache_params is not None and cache_params.has_previous_state:
in_projected_states = self.in_proj(hidden_states.squeeze(1)) # (B 2D)
d_mlp = (in_projected_states.shape[-1] - d_to_remove) // 2
split_projection_dim = [d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads]
_, _, gate, hidden_states_B_C, dt = torch.split(in_projected_states, split_projection_dim, dim=-1)
hidden_states_B_C = causal_conv1d_update(
hidden_states_B_C,
cache_params.conv_states[self.layer_idx],
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.activation,
)
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
dim=-1,
)
A = -torch.exp(self.A_log.float()) # (nheads,)
A = A[:, None, ...][:, :, None].expand(-1, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
dt = dt[:, :, None].expand(-1, -1, self.head_dim)
dt_bias = self.dt_bias[:, None, ...].expand(-1, self.head_dim)
D = self.D[:, None, ...].expand(-1, self.head_dim)
B = B.view(batch_size, self.n_groups, B.shape[1] // self.n_groups)
C = C.view(batch_size, self.n_groups, C.shape[1] // self.n_groups)
hidden_states_reshaped = hidden_states.view(batch_size, self.num_heads, self.head_dim)
hidden_states = selective_state_update(
cache_params.ssm_states[self.layer_idx],
hidden_states_reshaped,
dt,
A,
B,
C,
D,
z=None,
dt_bias=dt_bias,
dt_softplus=True,
)
hidden_states = hidden_states.view(batch_size, self.num_heads * self.head_dim)
hidden_states = self.norm(hidden_states, gate)
out = self.out_proj(hidden_states)[:, None, ...]
# if no cache is found, calling the kernel
else:
if attention_mask is not None and not torch.all(attention_mask == 1):
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
# 1. Gated MLP's linear projection
projected_states = self.in_proj(hidden_states)
A = -torch.exp(self.A_log.float()) # (num_heads) or (intermediate_size, state_size)
dt_limit_kwargs = {} if self.time_step_limit is None else {"dt_limit": self.time_step_limit}
if attention_mask is not None:
input_not_masked = torch.all(attention_mask == 1)
else:
input_not_masked = True
if self.use_mem_eff_path and self.training and cache_params is None and input_not_masked:
out, ssm_state = mamba_split_conv1d_scan_combined(
projected_states,
self.conv1d.weight.squeeze(1),
self.conv1d.bias,
self.dt_bias,
A,
D=self.D,
chunk_size=self.chunk_size,
seq_idx=None,
activation=self.activation,
rmsnorm_weight=self.norm.weight,
rmsnorm_eps=self.norm.variance_epsilon,
outproj_weight=self.out_proj.weight,
outproj_bias=self.out_proj.bias,
headdim=self.head_dim,
ngroups=self.n_groups,
norm_before_gate=False,
return_final_states=True,
**dt_limit_kwargs,
)
else:
gate, hidden_states_B_C, time_step = torch.split(
projected_states,
[self.intermediate_size, self.conv_dim, self.num_heads],
dim=-1,
)
# 1D Convolution
if cache_params is not None:
hidden_states_B_C_t = hidden_states_B_C.transpose(1, 2)
conv_state = nn.functional.pad(
hidden_states_B_C_t, (self.conv_kernel_size - hidden_states_B_C_t.shape[-1], 0)
)
cache_params.conv_states[self.layer_idx].copy_(conv_state)
if causal_conv1d_fn is None or self.activation not in ["silu", "swish"]:
hidden_states_B_C = self.act(
self.conv1d(hidden_states_B_C.transpose(1, 2)).transpose(1, 2)[:, :seq_len]
) # (B, L, self.d_inner + 2 * ngroups * d_state)
else:
hidden_states_B_C = causal_conv1d_fn(
x=hidden_states_B_C.transpose(1, 2),
weight=self.conv1d.weight.squeeze(1),
bias=self.conv1d.bias,
activation=self.activation,
).transpose(1, 2)[:, :seq_len]
hidden_states, B, C = torch.split(
hidden_states_B_C,
[self.intermediate_size, groups_time_state_size, groups_time_state_size],
dim=-1,
)
if attention_mask is not None and not torch.all(attention_mask == 1):
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
dtype = hidden_states.dtype
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
scan_output, ssm_state = mamba_chunk_scan_combined(
hidden_states.view(batch_size, seq_len, -1, self.head_dim),
time_step,
A,
B.view(batch_size, seq_len, self.n_groups, -1),
C.view(batch_size, seq_len, self.n_groups, -1),
chunk_size=self.chunk_size,
D=self.D,
z=None,
seq_idx=None,
return_final_states=True,
dt_bias=self.dt_bias,
dt_softplus=True,
**dt_limit_kwargs,
)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = scan_output.view(batch_size, seq_len, -1)
# Multiply "gate" branch and apply extra normalization layer
scan_output = self.norm(scan_output, gate)
out = self.out_proj(scan_output)
return out
# fmt: off
def torch_forward(self, input_states, cache_params: Optional[Zamba2HybridDynamicCache]=None, attention_mask: Optional[torch.Tensor]=None):
batch_size, seq_len, _ = input_states.shape
dtype = input_states.dtype
# Gated MLP's linear projection
if cache_params is not None and cache_params.has_previous_state:
projected_states = self.in_proj(input_states.squeeze(1))
else:
if attention_mask is not None and not torch.all(attention_mask==1):
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
input_states = (input_states * attention_mask[:, :, None]).to(dtype)
projected_states = self.in_proj(input_states)
d_mlp = (projected_states.shape[-1] - 2 * self.intermediate_size - 2 * self.n_groups * self.ssm_state_size- self.num_heads) // 2
_, _, gate, hidden_states, dt = projected_states.split(
[d_mlp, d_mlp, self.intermediate_size, self.conv_dim, self.num_heads], dim=-1
)
# Convolution sequence transformation
if cache_params is not None:
ssm_state = cache_params.ssm_states[self.layer_idx].clone()
ssm_state = ssm_state.to(hidden_states.device)
if cache_params.has_previous_state:
gate = gate.unsqueeze(1)
conv_state = cache_params.conv_states[self.layer_idx] # [batch, intermediate_size, conv_kernel_size]
conv_state = torch.roll(conv_state, shifts=-1, dims=-1)
# handle batched generation - states are copied through
conv_state[:, :, -1] = hidden_states[:, 0, :] if hidden_states.ndim == 3 else hidden_states
cache_params.conv_states[self.layer_idx].copy_(conv_state)
hidden_states = torch.sum(conv_state.to(projected_states.device) * self.conv1d.weight[:, 0, :], dim=-1)
if self.use_conv_bias:
hidden_states += self.conv1d.bias
hidden_states = self.act(hidden_states).to(dtype)[:, None, ...] # [batch, 1, intermediate_size] : decoding
else:
hidden_states = hidden_states.transpose(1,2)
conv_state = nn.functional.pad(
hidden_states,
(self.conv_kernel_size - hidden_states.shape[-1], 0)
)
cache_params.conv_states[self.layer_idx].copy_(conv_state)
hidden_states = self.act(self.conv1d(hidden_states).transpose(1,2))[:, :seq_len, :] # [batch, intermediate_size, seq_len]
if attention_mask is not None and not torch.all(attention_mask==1):
dtype = hidden_states.dtype
# tune out hidden states for pad tokens, see https://github.com/state-spaces/mamba/issues/66
hidden_states = (hidden_states * attention_mask[:, :, None]).to(dtype)
else:
ssm_state = torch.zeros(
(batch_size, self.num_heads, self.head_dim, self.ssm_state_size),
device=hidden_states.device, dtype=dtype
)
hidden_states = self.act(self.conv1d(hidden_states.transpose(1, 2))[..., :seq_len].transpose(1, 2))
hidden_states, B, C = torch.split(hidden_states, [self.intermediate_size, self.n_groups * self.ssm_state_size, self.n_groups * self.ssm_state_size], dim=-1)
A = -torch.exp(self.A_log.float()) # [num_heads]
if cache_params is not None and cache_params.has_previous_state:
# Note: there is no need to pad parameter matrices here, as there is just one new token
# for batched generation
dt = dt[:, None, ...] if dt.ndim == 2 else dt[:, 0, :][:, None, ...]
dt = dt.transpose(1, 2).expand(batch_size, dt.shape[-1], self.head_dim)
# [num_heads] -> [num_heads, head_dim]
dt_bias = self.dt_bias[..., None].expand(self.dt_bias.shape[0], self.head_dim)
dt = torch.nn.functional.softplus(dt + dt_bias.to(dt.dtype))
dt = torch.clamp(dt, self.time_step_min) #, self.time_step_max)
A = A[..., None, None].expand(self.num_heads, self.head_dim, self.ssm_state_size).to(dtype=torch.float32)
# [bsz, num_heads, head_dim, state_size]
dA = torch.exp(dt[..., None] * A)
# Discretize B
# [bsz, n_groups * state_size] -> [bsz, n_groups, 1, state_size] ->
# -> [bsz, n_groups, group to head repetition factor, state_size] -> [bsz, num_heads, state_size]
B = B.reshape(batch_size, self.n_groups, -1)[..., None, :]
B = B.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, B.shape[-1]).contiguous()
B = B.reshape(batch_size, -1, B.shape[-1])
# [bsz, num_heads, head_dim, state_size]
dB = dt[..., None] * B[..., None, :]
# Discretize x into dB
# [bsz, intermediate_size] -> [bsz, num_heads, head_dim]
hidden_states = hidden_states.reshape(batch_size, -1, self.head_dim)
dBx = dB * hidden_states[..., None]
# State calculation
cache_params.ssm_states[self.layer_idx].copy_(
cache_params.ssm_states[self.layer_idx] * dA + dBx
)
# Subsequent output
# [bsz, n_groups * state_size] -> [bsz, num_heads, state_size]
C = C.reshape(batch_size, self.n_groups, -1)[..., None, :]
C = C.expand(batch_size, self.n_groups, self.num_heads // self.n_groups, C.shape[-1]).contiguous()
C = C.reshape(batch_size, -1, C.shape[-1])
# [bsz, num_heads, head_dim]
ssm_states = cache_params.ssm_states[self.layer_idx].to(C.dtype) # Shape: [b, h, d, n]
# Reshape ssm_states to merge the first two dimensions
ssm_states_reshaped = ssm_states.view(batch_size * self.num_heads, self.head_dim, self.ssm_state_size) # Shape: [b*h, d, n]
C_reshaped = C.view(batch_size * self.num_heads, self.ssm_state_size, 1) # Shape: [b*h, n, 1]
y = torch.bmm(ssm_states_reshaped, C_reshaped)
y = y.view(batch_size, self.num_heads, self.head_dim)
# D skip connection
# [num_heads] -> [num_heads, head_dim]
D = self.D[..., None].expand(self.D.shape[0], self.head_dim)
y = (y + hidden_states * D).to(y.dtype)
# [bsz, num_heads, head_dim] -> [bsz, 1, intermediate_size]
y = y.reshape(batch_size, -1)[:, None, ...]
else:
# begin ssd naive implementation without einsums
dt = nn.functional.softplus(dt + self.dt_bias)
dt = torch.clamp(dt, self.time_step_min)
hidden_states = hidden_states.reshape(batch_size, seq_len, -1, self.head_dim).float()
B = B.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
C = C.reshape(batch_size, seq_len, -1, self.ssm_state_size).float()
B = B.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
C = C.repeat_interleave(self.num_heads // self.n_groups, dim=2, output_size=self.num_heads)
pad_size = (self.chunk_size - seq_len % self.chunk_size) % self.chunk_size
D_residual = self.D[..., None] * pad_tensor_by_size(hidden_states, pad_size)
# Discretize x and A
hidden_states = hidden_states * dt[..., None]
A = A.to(hidden_states.dtype) * dt
# Rearrange into blocks/chunks
hidden_states, A, B, C = [reshape_into_chunks(t, pad_size, self.chunk_size) for t in (hidden_states, A, B, C)]
# [bsz, -1, chunk_size, num_heads] -> [bsz, num_heads, -1, chunk_size]
A = A.permute(0, 3, 1, 2)
A_cumsum = torch.cumsum(A, dim=-1)
# 1. Compute the output for each intra-chunk (diagonal blocks)
# This is the analog of a causal mask
L = torch.exp(segment_sum(A))
# First, contraction of C and B to get G (attention-weights like)
G_intermediate = C[:, :, :, None, :, :] * B[:, :, None, :, : ,:] # shape: (b, c, l, s, h, n)
G = G_intermediate.sum(dim=-1) # shape: (b, c, l, s, h)
# Step 2: Compute M, equivalent to applying attention mask to weights
M_intermediate = G[..., None] * L.permute(0, 2, 3, 4, 1)[..., None]
M = M_intermediate.sum(dim=-1)
# Step 3: Compute Y_diag (apply to values)
Y_diag = (M[..., None] * hidden_states[:, :, None]).sum(3)
# (right term of low-rank factorization of off-diagonal blocks; B terms)
decay_states = torch.exp(A_cumsum[:, :, :, -1:] - A_cumsum)
B_decay_contraction = B * decay_states.permute(0, 2, 3, 1)[..., None]
# permute back B * decay states
states = (B_decay_contraction.permute(0, 1, 3, 2, 4)[..., None] * hidden_states.permute(0, 1, 3, 2, 4)[..., None, :]).sum(dim=3).permute(0, 1, 2, 4, 3)
if cache_params is not None and cache_params.has_previous_state:
previous_states = cache_params.ssm_states[self.layer_idx][:, None, ...]
else:
previous_states = torch.zeros_like(states[:, :1])
states = torch.cat([previous_states, states], dim=1)
decay_chunk = torch.exp(segment_sum(nn.functional.pad(A_cumsum[:, :, :, -1], (1, 0))))
states_permuted = states.permute(0, 2, 1, 3, 4)
result = (decay_chunk[..., None, None] * states_permuted[:, :, None, ...]).sum(dim=2)
new_states = result.permute(0, 2, 1, 3, 4)
states, ssm_state = new_states[:, :-1], new_states[:, -1]
# Compute state -> output conversion per chunk
# (left term of low-rank factorization of off-diagonal blocks; C terms)
state_decay_out = torch.exp(A_cumsum)
# compute Yoff
C_times_states = (C[..., None, :] * states[:, :, None, ...])
state_decay_out_permuted = state_decay_out.permute(0, 2, 3, 1)
Y_off = (C_times_states.sum(-1) * state_decay_out_permuted[..., None])
# Add output of intra-chunk and inter-chunk terms (diagonal and off-diagonal blocks)
y = Y_diag + Y_off
# [bsz, -1, self.chunk_size, num_heads, head_dim] -> [bsz, (padded) seq_len, num_heads, head_dim]
y = y.reshape(batch_size, -1, self.num_heads, self.head_dim)
y = y + D_residual
# Cutting off padded chunks
if pad_size > 0:
y = y[:, :seq_len, :, :]
y = y.reshape(batch_size, seq_len, -1)
if ssm_state is not None and cache_params is not None:
cache_params.ssm_states[self.layer_idx].copy_(ssm_state)
scan_output = self.norm(y, gate)
# end ssd naive
# 4. Final linear projection
contextualized_states = self.out_proj(scan_output.to(dtype)) # [batch, seq_len, hidden_size]
return contextualized_states
# fmt: on
def forward(
self,
hidden_states,
cache_params: Optional[Zamba2HybridDynamicCache] = None,
attention_mask: Optional[torch.Tensor] = None,
):
if is_fast_path_available and "cuda" in self.in_proj.weight.device.type:
return self.cuda_kernels_forward(hidden_states, cache_params, attention_mask)
return self.torch_forward(hidden_states, cache_params, attention_mask)
class Zamba2MLP(nn.Module):
def __init__(self, config: Zamba2Config, num_fwd_mem_blocks=None, block_id: Optional[int] = None):
"""
This MLP layer contributes to tied transformer blocks aimed to increasing compute without increasing model size. Because this layer
is tied, un-tied adapter modules (formally same as LoRA, but used in the base model) are added to the up and gate projectors to increase expressivity with a small memory overhead.
"""
super().__init__()
self.config = config
self.hidden_size = config.hidden_size
self.intermediate_size = config.intermediate_size
self.num_fwd_mem_blocks = num_fwd_mem_blocks
self.block_id = block_id
self.gate_up_proj = nn.Linear(self.hidden_size, 2 * self.intermediate_size, bias=config.add_bias_linear)
self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.add_bias_linear)
self.act_fn = ACT2FN[config.hidden_act]
self.gate_up_proj_adapter_list = nn.ModuleList([])
for i in range(self.num_fwd_mem_blocks):
if i % config.num_mem_blocks == block_id:
gate_up_proj_adapter = nn.Sequential(
nn.Linear(self.config.hidden_size, self.config.adapter_rank, bias=False),
nn.Linear(self.config.adapter_rank, 2 * self.intermediate_size, bias=False),
)
else:
gate_up_proj_adapter = nn.Identity()
self.gate_up_proj_adapter_list.append(gate_up_proj_adapter)
layer_block_map = config.hybrid_layer_ids
self.layer_dic = {value: index for index, value in enumerate(layer_block_map)}
def forward(self, hidden_state, layer_idx=None):
gate_up_state = self.gate_up_proj(hidden_state)
layer_idx = self.layer_dic[layer_idx]
gate_up_state = gate_up_state + self.gate_up_proj_adapter_list[layer_idx](hidden_state)
gate_up_state = torch.chunk(gate_up_state, 2, dim=-1)
hidden_state = self.act_fn(gate_up_state[0]) * gate_up_state[1]
output = self.down_proj(hidden_state)
return output
class Zamba2AttentionDecoderLayer(nn.Module):
def __init__(self, config: Zamba2Config, block_id: Optional[int] = None, layer_idx: Optional[int] = None):
super().__init__()
self.block_id = block_id
num_gs = len(config.hybrid_layer_ids)
self.self_attn = Zamba2Attention(config, layer_idx=-1, num_fwd_mem_blocks=num_gs, block_id=block_id)
self.feed_forward = Zamba2MLP(config, num_fwd_mem_blocks=num_gs, block_id=block_id)
self.input_layernorm = Zamba2RMSNorm(config.attention_hidden_size, eps=config.rms_norm_eps)
self.pre_ff_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
original_hidden_states: torch.Tensor,
layer_idx: int,
attention_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Zamba2HybridDynamicCache] = None,
output_attentions: Optional[bool] = False,
position_embeddings: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): output of previous Mamba layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output of shape `(batch, seq_len, embed_dim)`.
This is concatenated with `hidden_states` (which is the output of the previous (mamba) layer). The
concatenated tensor is then used as input of the pre-attention RMSNorm
(see fig. 2 in https://huggingface.co/papers/2405.16712).
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
"""
hidden_states = torch.concatenate([hidden_states, original_hidden_states], dim=-1)
hidden_states = self.input_layernorm(hidden_states)
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
layer_idx=layer_idx,
attention_mask=attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = self.pre_ff_layernorm(hidden_states)
hidden_states = self.feed_forward(hidden_states, layer_idx)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
return outputs
class Zamba2MambaDecoderLayer(nn.Module):
def __init__(self, config: Zamba2Config, layer_idx: int):
super().__init__()
self.mamba = Zamba2MambaMixer(config=config, layer_idx=layer_idx)
self.input_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.layer_idx = layer_idx
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
original_hidden_states: Optional[torch.Tensor] = None,
layer_idx: Optional[int] = None,
attention_mask: Optional[torch.Tensor] = None,
causal_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Zamba2HybridDynamicCache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
transformer_hidden_states: Optional[torch.Tensor] = None,
**kwargs,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
Indices depicting the position of the input sequence tokens in the sequence.
"""
residual = hidden_states
# `transformer_hidden_states` is the output from shared transformer + linear layer (see fig. 2 in https://huggingface.co/papers/2405.16712).
# `transformer_hidden_states` is then added to the input to the mamba layer below (as described in eq. (6) of https://huggingface.co/papers/2405.16712).
hidden_states = (
hidden_states + transformer_hidden_states if transformer_hidden_states is not None else hidden_states
)
hidden_states = self.input_layernorm(hidden_states)
hidden_states = self.mamba(
hidden_states=hidden_states,
cache_params=past_key_values,
attention_mask=attention_mask,
)
self_attn_weights = None
# residual connection after mamba
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights,)
if use_cache:
outputs += (past_key_values,)
return outputs
class Zamba2HybridLayer(nn.Module):
def __init__(
self, shared_transformer: Zamba2AttentionDecoderLayer, linear: nn.Linear, mamba: Zamba2MambaDecoderLayer
):
super().__init__()
self.linear = linear
self.mamba_decoder = mamba
self.shared_transformer = shared_transformer
@deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
def forward(
self,
hidden_states: torch.Tensor,
original_hidden_states: Optional[torch.Tensor] = None,
layer_idx: Optional[int] = None,
attention_mask: Optional[torch.Tensor] = None,
causal_mask: Optional[torch.Tensor] = None,
past_key_values: Optional[Zamba2HybridDynamicCache] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = False,
position_embeddings: Optional[torch.LongTensor] = None,
) -> tuple[torch.FloatTensor, Optional[tuple[torch.FloatTensor, torch.FloatTensor]]]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
original_hidden_states (`torch.FloatTensor`): word embedding output that will be concatenated with
hidden activations to form the input of the shared transformer layer.
layer_idx (`int`): layer number.
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
`(batch, sequence_length)` where padding elements are indicated by 0.
past_key_values (`Zamba2HybridDynamicCache`, *optional*): cached past key and value projection states
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
position_embeddings (`tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
with `head_dim` being the embedding dimension of each attention head.
"""
layer_outputs = self.shared_transformer(
hidden_states,
original_hidden_states=original_hidden_states,
layer_idx=layer_idx,
attention_mask=causal_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
position_embeddings=position_embeddings,
)
transformer_hidden_states = layer_outputs[0]
if output_attentions:
self_attn_weights = layer_outputs[1]
transformer_hidden_states = self.linear(transformer_hidden_states)
layer_outputs = self.mamba_decoder(
hidden_states,
transformer_hidden_states=transformer_hidden_states,
attention_mask=attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
position_embeddings=position_embeddings,
)
if output_attentions:
layer_outputs = (layer_outputs[0], self_attn_weights) + layer_outputs[2:]
return layer_outputs
class Zamba2PreTrainedModel(PreTrainedModel):
config: Zamba2Config
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Zamba2AttentionDecoderLayer", "Zamba2MambaDecoderLayer"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_flex_attn = True
_supports_sdpa = True
# Note: only supports Zamba2HybridDynamicCache
_is_stateful = True
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Zamba2MambaMixer):
dt = torch.exp(
torch.rand(self.config.n_mamba_heads)
* (math.log(self.config.time_step_max) - math.log(self.config.time_step_min))
+ math.log(self.config.time_step_min)
).clamp(min=self.config.time_step_floor)
# # Inverse of softplus: https://github.com/pytorch/pytorch/issues/72759
inv_dt = dt + torch.log(-torch.expm1(-dt))
module.dt_bias.data.copy_(inv_dt)
A = torch.arange(1, module.num_heads + 1)
module.A_log.data.copy_(torch.log(A))
module.D.data.fill_(1.0)
@auto_docstring
class Zamba2Model(Zamba2PreTrainedModel):
"""
Model consisting of *config.num_hidden_layers* layers.
Args:
config: Zamba2Config
"""
def __init__(self, config: Zamba2Config):
super().__init__(config)
self.config = config
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
blocks = [Zamba2AttentionDecoderLayer(config, block_id=k) for k in range(config.num_mem_blocks)]
mamba_layers = []
linear_layers = []
self.layers_block_type = config.layers_block_type
for i in range(config.num_hidden_layers):
if config.layers_block_type[i] == "mamba":
mamba_layers.append(Zamba2MambaDecoderLayer(config, layer_idx=i))
elif config.layers_block_type[i] == "hybrid":
linear_layers.append(nn.Linear(self.config.hidden_size, self.config.hidden_size, bias=False))
mamba_layers.append(Zamba2MambaDecoderLayer(config, layer_idx=i))
mamba_layers = iter(mamba_layers)
linear_layers = iter(linear_layers)
blocks = cycle(blocks)
layers = self.get_layers(blocks, linear_layers, mamba_layers)
self.layers = nn.ModuleList(layers)
self._attn_implementation = config._attn_implementation
self.final_layernorm = Zamba2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
if config.use_mem_rope:
if config.use_long_context:
logger.warning_once(
"`use_long_context` set to `True`: using rescaled `rope_theta` and extended `max_position_embeddings`."
)
self.rotary_emb = Zamba2RotaryEmbedding(config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Zamba2HybridDynamicCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
) -> Union[tuple, BaseModelOutputWithPast]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
)
if self.gradient_checkpointing and self.training and use_cache:
logger.warning_once(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
)
use_cache = False
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
hidden_states = inputs_embeds
original_hidden_states = torch.clone(inputs_embeds)
# original_hidden_states: word embedding output that will be concatenated with hidden activations to form the input of the shared transformer layer
if use_cache and past_key_values is None:
batch_size = input_ids.shape[0] if input_ids is not None else inputs_embeds.shape[0]
past_key_values = Zamba2HybridDynamicCache(self.config, batch_size, dtype=self.dtype, device=self.device)
if cache_position is None:
past_seen_tokens = (
past_key_values.get_seq_length(layer_idx=self.first_transformer_layer_id)
if past_key_values is not None
else 0
)
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position)
# create position embeddings to be shared across the decoder layers
if self.config.use_mem_rope:
position_embeddings = self.rotary_emb(hidden_states, position_ids)
else:
position_embeddings = None
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
for layer_idx, layer in enumerate(self.layers):
if output_hidden_states:
all_hidden_states += (hidden_states,)
if self.gradient_checkpointing and self.training:
layer_outputs = self._gradient_checkpointing_func(
layer.__call__,
hidden_states,
original_hidden_states,
layer_idx,
attention_mask,
causal_mask,
past_key_values,
output_attentions,
use_cache,
position_embeddings,
)
else:
layer_outputs = layer(
hidden_states,
original_hidden_states=original_hidden_states,
layer_idx=layer_idx,
attention_mask=attention_mask,
causal_mask=causal_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
use_cache=use_cache,
position_embeddings=position_embeddings,
)
hidden_states = layer_outputs[0]
if output_attentions:
if layer_outputs[1] is not None:
# append attentions only of attention layers. Mamba layers return `None` as the attention weights
all_self_attns += (layer_outputs[1],)
hidden_states = self.final_layernorm(hidden_states)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
if past_key_values is not None and not past_key_values.has_previous_state:
past_key_values.has_previous_state = True
output = BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
hidden_states=all_hidden_states,
attentions=all_self_attns,
)
return output if return_dict else output.to_tuple()
def _update_causal_mask(self, attention_mask, input_tensor, cache_position):
if self.config._attn_implementation == "flash_attention_2":
if attention_mask is not None and 0.0 in attention_mask:
return attention_mask
return None
dtype, device = input_tensor.dtype, input_tensor.device
min_dtype = torch.finfo(dtype).min
sequence_length = input_tensor.shape[1]
target_length = cache_position[-1] + 1
causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
if sequence_length != 1:
causal_mask = torch.triu(causal_mask, diagonal=1)
causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
if attention_mask is not None:
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
if attention_mask.dim() == 2:
mask_length = attention_mask.shape[-1]
padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0)
causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype)
if (
self.config._attn_implementation == "sdpa"
and attention_mask is not None
and attention_mask.device.type in ["cuda", "xpu", "npu"]
):
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
# Details: https://github.com/pytorch/pytorch/issues/110213
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
return causal_mask
def get_layers(self, blocks, linear_layers, mamba_layers):
layers = []
self._tied_weights_keys = []
self.first_transformer_layer_id = 0
for layer_id, layer_type in enumerate(self.layers_block_type):
if layer_type == "hybrid":
if self.first_transformer_layer_id == 0:
self.first_transformer_layer_id = layer_id
block = next(blocks)
if self.config.num_mem_blocks * len(self.config.hybrid_layer_ids) > 1:
prefix_pattern = rf"^layers\.{layer_id}\.shared_transformer\."
main_keys_pattern = re.compile(
prefix_pattern
+ r"(?:"
+ r"self_attn\.(?:q_proj|k_proj|v_proj|o_proj)\.weight|"
+ r"feed_forward\.(?:gate_up_proj|down_proj)\.weight|"
+ r"(?:input_layernorm|pre_ff_layernorm)\.weight"
+ r")$"
)
self._tied_weights_keys.append(main_keys_pattern)
adapter_id = 0
for _layer_type in self.layers_block_type:
if _layer_type == "hybrid" and adapter_id % self.config.num_mem_blocks == block.block_id:
adapter_pattern = re.compile(
r"^shared_transformer\.feed_forward\.gate_up_proj_adapter_list\."
+ str(adapter_id)
+ r"\.(?:0|1)\.weight$"
)
self._tied_weights_keys.append(adapter_pattern)
adapter_id += 1
if self.config.use_shared_attention_adapter:
adapter_id = 0
for _layer_type in self.layers_block_type:
if _layer_type == "hybrid" and adapter_id % self.config.num_mem_blocks == block.block_id:
attn_adapter_pattern = re.compile(
r"^shared_transformer\.self_attn\."
+ r"(?:linear_q_adapter_list|linear_k_adapter_list|linear_v_adapter_list)\."
+ str(adapter_id)
+ r"\.(?:0|1)\.weight$"
)
self._tied_weights_keys.append(attn_adapter_pattern)
adapter_id += 1
layers.append(Zamba2HybridLayer(block, next(linear_layers), next(mamba_layers)))
else:
layers.append(next(mamba_layers))
return layers
# Adapted from transformers.models.jamba.modeling_jamba.JambaForCausalLM with Jamba->Zamba2, JAMBA->ZAMBA2
class Zamba2ForCausalLM(Zamba2PreTrainedModel, GenerationMixin):
def __init__(self, config: Zamba2Config):
super().__init__(config)
self.model = Zamba2Model(config)
self._tied_weights_keys = ["lm_head.weight", *self.model._tied_weights_keys]
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
def set_decoder(self, decoder):
self.model = decoder
def get_decoder(self):
return self.model
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Zamba2HybridDynamicCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, CausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from transformers import AutoTokenizer, Zamba2ForCausalLM
>>> model = Zamba2ForCausalLM.from_pretrained("Zyphra/Zamba2-7B-v1")
>>> tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-7B-v1")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
return_dict=return_dict,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
**kwargs,
):
# Overwritten -- has a unique cache type, `Zamba2HybridDynamicCache`
empty_past_kv = past_key_values is None
# Omit tokens covered by past_key_values
if not empty_past_kv:
# If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
# Exception 1: when passing input_embeds, input_ids may be missing entries
# Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
# Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case.
# (we can't check exception 3 while compiling)
if (
inputs_embeds is not None # Exception 1
or cache_position[-1] >= input_ids.shape[1] # Exception 3
):
input_ids = input_ids[:, -cache_position.shape[0] :]
elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
input_ids = input_ids[:, cache_position]
else:
past_key_values = Zamba2HybridDynamicCache(
self.config, input_ids.shape[0], dtype=self.dtype, device=self.device
)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if not empty_past_kv:
position_ids = position_ids[:, -input_ids.shape[1] :]
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and empty_past_kv:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases
model_inputs.update(
{
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": use_cache,
"attention_mask": attention_mask,
"logits_to_keep": self.config.num_logits_to_keep,
"cache_position": cache_position,
}
)
return model_inputs
@auto_docstring(
custom_intro="""
The Zamba2 Model with a sequence classification head on top (linear layer).
[`Zamba2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT-2) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
"""
)
class Zamba2ForSequenceClassification(Zamba2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.model = Zamba2Model(config)
self._tied_weights_keys = self.model._tied_weights_keys
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Union[Cache, list[torch.FloatTensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, SequenceClassifierOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
transformer_outputs = self.model(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
if input_ids is not None:
batch_size = input_ids.shape[0]
else:
batch_size = inputs_embeds.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
labels = labels.to(logits.device)
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(pooled_logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(pooled_logits, labels)
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
__all__ = ["Zamba2ForCausalLM", "Zamba2ForSequenceClassification", "Zamba2Model", "Zamba2PreTrainedModel"]
| transformers/src/transformers/models/zamba2/modeling_zamba2.py/0 | {
"file_path": "transformers/src/transformers/models/zamba2/modeling_zamba2.py",
"repo_id": "transformers",
"token_count": 39025
} | 522 |
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any, Optional, Union
from huggingface_hub import model_info
from ..configuration_utils import PretrainedConfig
from ..dynamic_module_utils import get_class_from_dynamic_module
from ..feature_extraction_utils import PreTrainedFeatureExtractor
from ..image_processing_utils import BaseImageProcessor
from ..models.auto.configuration_auto import AutoConfig
from ..models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor
from ..models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor
from ..models.auto.modeling_auto import AutoModelForDepthEstimation, AutoModelForImageToImage
from ..models.auto.processing_auto import PROCESSOR_MAPPING, AutoProcessor
from ..models.auto.tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
from ..processing_utils import ProcessorMixin
from ..tokenization_utils import PreTrainedTokenizer
from ..utils import (
CONFIG_NAME,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
cached_file,
extract_commit_hash,
find_adapter_config_file,
is_kenlm_available,
is_offline_mode,
is_peft_available,
is_pyctcdecode_available,
is_tf_available,
is_torch_available,
logging,
)
from .audio_classification import AudioClassificationPipeline
from .automatic_speech_recognition import AutomaticSpeechRecognitionPipeline
from .base import (
ArgumentHandler,
CsvPipelineDataFormat,
JsonPipelineDataFormat,
PipedPipelineDataFormat,
Pipeline,
PipelineDataFormat,
PipelineException,
PipelineRegistry,
get_default_model_and_revision,
infer_framework_load_model,
)
from .depth_estimation import DepthEstimationPipeline
from .document_question_answering import DocumentQuestionAnsweringPipeline
from .feature_extraction import FeatureExtractionPipeline
from .fill_mask import FillMaskPipeline
from .image_classification import ImageClassificationPipeline
from .image_feature_extraction import ImageFeatureExtractionPipeline
from .image_segmentation import ImageSegmentationPipeline
from .image_text_to_text import ImageTextToTextPipeline
from .image_to_image import ImageToImagePipeline
from .image_to_text import ImageToTextPipeline
from .mask_generation import MaskGenerationPipeline
from .object_detection import ObjectDetectionPipeline
from .question_answering import QuestionAnsweringArgumentHandler, QuestionAnsweringPipeline
from .table_question_answering import TableQuestionAnsweringArgumentHandler, TableQuestionAnsweringPipeline
from .text2text_generation import SummarizationPipeline, Text2TextGenerationPipeline, TranslationPipeline
from .text_classification import TextClassificationPipeline
from .text_generation import TextGenerationPipeline
from .text_to_audio import TextToAudioPipeline
from .token_classification import (
AggregationStrategy,
NerPipeline,
TokenClassificationArgumentHandler,
TokenClassificationPipeline,
)
from .video_classification import VideoClassificationPipeline
from .visual_question_answering import VisualQuestionAnsweringPipeline
from .zero_shot_audio_classification import ZeroShotAudioClassificationPipeline
from .zero_shot_classification import ZeroShotClassificationArgumentHandler, ZeroShotClassificationPipeline
from .zero_shot_image_classification import ZeroShotImageClassificationPipeline
from .zero_shot_object_detection import ZeroShotObjectDetectionPipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForImageClassification,
TFAutoModelForMaskedLM,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelForVision2Seq,
TFAutoModelForZeroShotImageClassification,
)
if is_torch_available():
import torch
from ..models.auto.modeling_auto import (
AutoModel,
AutoModelForAudioClassification,
AutoModelForCausalLM,
AutoModelForCTC,
AutoModelForDocumentQuestionAnswering,
AutoModelForImageClassification,
AutoModelForImageSegmentation,
AutoModelForImageTextToText,
AutoModelForMaskedLM,
AutoModelForMaskGeneration,
AutoModelForObjectDetection,
AutoModelForQuestionAnswering,
AutoModelForSemanticSegmentation,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForSpeechSeq2Seq,
AutoModelForTableQuestionAnswering,
AutoModelForTextToSpectrogram,
AutoModelForTextToWaveform,
AutoModelForTokenClassification,
AutoModelForVideoClassification,
AutoModelForVision2Seq,
AutoModelForVisualQuestionAnswering,
AutoModelForZeroShotImageClassification,
AutoModelForZeroShotObjectDetection,
)
if TYPE_CHECKING:
from ..modeling_tf_utils import TFPreTrainedModel
from ..modeling_utils import PreTrainedModel
from ..tokenization_utils_fast import PreTrainedTokenizerFast
logger = logging.get_logger(__name__)
# Register all the supported tasks here
TASK_ALIASES = {
"sentiment-analysis": "text-classification",
"ner": "token-classification",
"vqa": "visual-question-answering",
"text-to-speech": "text-to-audio",
}
SUPPORTED_TASKS = {
"audio-classification": {
"impl": AudioClassificationPipeline,
"tf": (),
"pt": (AutoModelForAudioClassification,) if is_torch_available() else (),
"default": {"model": {"pt": ("superb/wav2vec2-base-superb-ks", "372e048")}},
"type": "audio",
},
"automatic-speech-recognition": {
"impl": AutomaticSpeechRecognitionPipeline,
"tf": (),
"pt": (AutoModelForCTC, AutoModelForSpeechSeq2Seq) if is_torch_available() else (),
"default": {"model": {"pt": ("facebook/wav2vec2-base-960h", "22aad52")}},
"type": "multimodal",
},
"text-to-audio": {
"impl": TextToAudioPipeline,
"tf": (),
"pt": (AutoModelForTextToWaveform, AutoModelForTextToSpectrogram) if is_torch_available() else (),
"default": {"model": {"pt": ("suno/bark-small", "1dbd7a1")}},
"type": "text",
},
"feature-extraction": {
"impl": FeatureExtractionPipeline,
"tf": (TFAutoModel,) if is_tf_available() else (),
"pt": (AutoModel,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("distilbert/distilbert-base-cased", "6ea8117"),
"tf": ("distilbert/distilbert-base-cased", "6ea8117"),
}
},
"type": "multimodal",
},
"text-classification": {
"impl": TextClassificationPipeline,
"tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (),
"pt": (AutoModelForSequenceClassification,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("distilbert/distilbert-base-uncased-finetuned-sst-2-english", "714eb0f"),
"tf": ("distilbert/distilbert-base-uncased-finetuned-sst-2-english", "714eb0f"),
},
},
"type": "text",
},
"token-classification": {
"impl": TokenClassificationPipeline,
"tf": (TFAutoModelForTokenClassification,) if is_tf_available() else (),
"pt": (AutoModelForTokenClassification,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("dbmdz/bert-large-cased-finetuned-conll03-english", "4c53496"),
"tf": ("dbmdz/bert-large-cased-finetuned-conll03-english", "4c53496"),
},
},
"type": "text",
},
"question-answering": {
"impl": QuestionAnsweringPipeline,
"tf": (TFAutoModelForQuestionAnswering,) if is_tf_available() else (),
"pt": (AutoModelForQuestionAnswering,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("distilbert/distilbert-base-cased-distilled-squad", "564e9b5"),
"tf": ("distilbert/distilbert-base-cased-distilled-squad", "564e9b5"),
},
},
"type": "text",
},
"table-question-answering": {
"impl": TableQuestionAnsweringPipeline,
"pt": (AutoModelForTableQuestionAnswering,) if is_torch_available() else (),
"tf": (TFAutoModelForTableQuestionAnswering,) if is_tf_available() else (),
"default": {
"model": {
"pt": ("google/tapas-base-finetuned-wtq", "e3dde19"),
"tf": ("google/tapas-base-finetuned-wtq", "e3dde19"),
},
},
"type": "text",
},
"visual-question-answering": {
"impl": VisualQuestionAnsweringPipeline,
"pt": (AutoModelForVisualQuestionAnswering,) if is_torch_available() else (),
"tf": (),
"default": {
"model": {"pt": ("dandelin/vilt-b32-finetuned-vqa", "d0a1f6a")},
},
"type": "multimodal",
},
"document-question-answering": {
"impl": DocumentQuestionAnsweringPipeline,
"pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (),
"tf": (),
"default": {
"model": {"pt": ("impira/layoutlm-document-qa", "beed3c4")},
},
"type": "multimodal",
},
"fill-mask": {
"impl": FillMaskPipeline,
"tf": (TFAutoModelForMaskedLM,) if is_tf_available() else (),
"pt": (AutoModelForMaskedLM,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("distilbert/distilroberta-base", "fb53ab8"),
"tf": ("distilbert/distilroberta-base", "fb53ab8"),
}
},
"type": "text",
},
"summarization": {
"impl": SummarizationPipeline,
"tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
"default": {
"model": {"pt": ("sshleifer/distilbart-cnn-12-6", "a4f8f3e"), "tf": ("google-t5/t5-small", "df1b051")}
},
"type": "text",
},
# This task is a special case as it's parametrized by SRC, TGT languages.
"translation": {
"impl": TranslationPipeline,
"tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
"default": {
("en", "fr"): {"model": {"pt": ("google-t5/t5-base", "a9723ea"), "tf": ("google-t5/t5-base", "a9723ea")}},
("en", "de"): {"model": {"pt": ("google-t5/t5-base", "a9723ea"), "tf": ("google-t5/t5-base", "a9723ea")}},
("en", "ro"): {"model": {"pt": ("google-t5/t5-base", "a9723ea"), "tf": ("google-t5/t5-base", "a9723ea")}},
},
"type": "text",
},
"text2text-generation": {
"impl": Text2TextGenerationPipeline,
"tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
"pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
"default": {"model": {"pt": ("google-t5/t5-base", "a9723ea"), "tf": ("google-t5/t5-base", "a9723ea")}},
"type": "text",
},
"text-generation": {
"impl": TextGenerationPipeline,
"tf": (TFAutoModelForCausalLM,) if is_tf_available() else (),
"pt": (AutoModelForCausalLM,) if is_torch_available() else (),
"default": {"model": {"pt": ("openai-community/gpt2", "607a30d"), "tf": ("openai-community/gpt2", "607a30d")}},
"type": "text",
},
"zero-shot-classification": {
"impl": ZeroShotClassificationPipeline,
"tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (),
"pt": (AutoModelForSequenceClassification,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("facebook/bart-large-mnli", "d7645e1"),
"tf": ("FacebookAI/roberta-large-mnli", "2a8f12d"),
},
"config": {
"pt": ("facebook/bart-large-mnli", "d7645e1"),
"tf": ("FacebookAI/roberta-large-mnli", "2a8f12d"),
},
},
"type": "text",
},
"zero-shot-image-classification": {
"impl": ZeroShotImageClassificationPipeline,
"tf": (TFAutoModelForZeroShotImageClassification,) if is_tf_available() else (),
"pt": (AutoModelForZeroShotImageClassification,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("openai/clip-vit-base-patch32", "3d74acf"),
"tf": ("openai/clip-vit-base-patch32", "3d74acf"),
}
},
"type": "multimodal",
},
"zero-shot-audio-classification": {
"impl": ZeroShotAudioClassificationPipeline,
"tf": (),
"pt": (AutoModel,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("laion/clap-htsat-fused", "cca9e28"),
}
},
"type": "multimodal",
},
"image-classification": {
"impl": ImageClassificationPipeline,
"tf": (TFAutoModelForImageClassification,) if is_tf_available() else (),
"pt": (AutoModelForImageClassification,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("google/vit-base-patch16-224", "3f49326"),
"tf": ("google/vit-base-patch16-224", "3f49326"),
}
},
"type": "image",
},
"image-feature-extraction": {
"impl": ImageFeatureExtractionPipeline,
"tf": (TFAutoModel,) if is_tf_available() else (),
"pt": (AutoModel,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("google/vit-base-patch16-224", "3f49326"),
"tf": ("google/vit-base-patch16-224", "3f49326"),
}
},
"type": "image",
},
"image-segmentation": {
"impl": ImageSegmentationPipeline,
"tf": (),
"pt": (AutoModelForImageSegmentation, AutoModelForSemanticSegmentation) if is_torch_available() else (),
"default": {"model": {"pt": ("facebook/detr-resnet-50-panoptic", "d53b52a")}},
"type": "multimodal",
},
"image-to-text": {
"impl": ImageToTextPipeline,
"tf": (TFAutoModelForVision2Seq,) if is_tf_available() else (),
"pt": (AutoModelForVision2Seq,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("ydshieh/vit-gpt2-coco-en", "5bebf1e"),
"tf": ("ydshieh/vit-gpt2-coco-en", "5bebf1e"),
}
},
"type": "multimodal",
},
"image-text-to-text": {
"impl": ImageTextToTextPipeline,
"tf": (),
"pt": (AutoModelForImageTextToText,) if is_torch_available() else (),
"default": {
"model": {
"pt": ("llava-hf/llava-onevision-qwen2-0.5b-ov-hf", "2c9ba3b"),
}
},
"type": "multimodal",
},
"object-detection": {
"impl": ObjectDetectionPipeline,
"tf": (),
"pt": (AutoModelForObjectDetection,) if is_torch_available() else (),
"default": {"model": {"pt": ("facebook/detr-resnet-50", "1d5f47b")}},
"type": "multimodal",
},
"zero-shot-object-detection": {
"impl": ZeroShotObjectDetectionPipeline,
"tf": (),
"pt": (AutoModelForZeroShotObjectDetection,) if is_torch_available() else (),
"default": {"model": {"pt": ("google/owlvit-base-patch32", "cbc355f")}},
"type": "multimodal",
},
"depth-estimation": {
"impl": DepthEstimationPipeline,
"tf": (),
"pt": (AutoModelForDepthEstimation,) if is_torch_available() else (),
"default": {"model": {"pt": ("Intel/dpt-large", "bc15f29")}},
"type": "image",
},
"video-classification": {
"impl": VideoClassificationPipeline,
"tf": (),
"pt": (AutoModelForVideoClassification,) if is_torch_available() else (),
"default": {"model": {"pt": ("MCG-NJU/videomae-base-finetuned-kinetics", "488eb9a")}},
"type": "video",
},
"mask-generation": {
"impl": MaskGenerationPipeline,
"tf": (),
"pt": (AutoModelForMaskGeneration,) if is_torch_available() else (),
"default": {"model": {"pt": ("facebook/sam-vit-huge", "87aecf0")}},
"type": "multimodal",
},
"image-to-image": {
"impl": ImageToImagePipeline,
"tf": (),
"pt": (AutoModelForImageToImage,) if is_torch_available() else (),
"default": {"model": {"pt": ("caidas/swin2SR-classical-sr-x2-64", "cee1c92")}},
"type": "image",
},
}
PIPELINE_REGISTRY = PipelineRegistry(supported_tasks=SUPPORTED_TASKS, task_aliases=TASK_ALIASES)
def get_supported_tasks() -> list[str]:
"""
Returns a list of supported task strings.
"""
return PIPELINE_REGISTRY.get_supported_tasks()
def get_task(model: str, token: Optional[str] = None, **deprecated_kwargs) -> str:
use_auth_token = deprecated_kwargs.pop("use_auth_token", None)
if use_auth_token is not None:
warnings.warn(
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
FutureWarning,
)
if token is not None:
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
token = use_auth_token
if is_offline_mode():
raise RuntimeError("You cannot infer task automatically within `pipeline` when using offline mode")
try:
info = model_info(model, token=token)
except Exception as e:
raise RuntimeError(f"Instantiating a pipeline without a task set raised an error: {e}")
if not info.pipeline_tag:
raise RuntimeError(
f"The model {model} does not seem to have a correct `pipeline_tag` set to infer the task automatically"
)
if getattr(info, "library_name", "transformers") not in {"transformers", "timm"}:
raise RuntimeError(f"This model is meant to be used with {info.library_name} not with transformers")
task = info.pipeline_tag
return task
def check_task(task: str) -> tuple[str, dict, Any]:
"""
Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and
default models if they exist.
Args:
task (`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- `"audio-classification"`
- `"automatic-speech-recognition"`
- `"conversational"`
- `"depth-estimation"`
- `"document-question-answering"`
- `"feature-extraction"`
- `"fill-mask"`
- `"image-classification"`
- `"image-feature-extraction"`
- `"image-segmentation"`
- `"image-to-text"`
- `"image-to-image"`
- `"object-detection"`
- `"question-answering"`
- `"summarization"`
- `"table-question-answering"`
- `"text2text-generation"`
- `"text-classification"` (alias `"sentiment-analysis"` available)
- `"text-generation"`
- `"text-to-audio"` (alias `"text-to-speech"` available)
- `"token-classification"` (alias `"ner"` available)
- `"translation"`
- `"translation_xx_to_yy"`
- `"video-classification"`
- `"visual-question-answering"` (alias `"vqa"` available)
- `"zero-shot-classification"`
- `"zero-shot-image-classification"`
- `"zero-shot-object-detection"`
Returns:
(normalized_task: `str`, task_defaults: `dict`, task_options: (`tuple`, None)) The normalized task name
(removed alias and options). The actual dictionary required to initialize the pipeline and some extra task
options for parametrized tasks like "translation_xx_to_yy"
"""
return PIPELINE_REGISTRY.check_task(task)
def clean_custom_task(task_info):
import transformers
if "impl" not in task_info:
raise RuntimeError("This model introduces a custom pipeline without specifying its implementation.")
pt_class_names = task_info.get("pt", ())
if isinstance(pt_class_names, str):
pt_class_names = [pt_class_names]
task_info["pt"] = tuple(getattr(transformers, c) for c in pt_class_names)
tf_class_names = task_info.get("tf", ())
if isinstance(tf_class_names, str):
tf_class_names = [tf_class_names]
task_info["tf"] = tuple(getattr(transformers, c) for c in tf_class_names)
return task_info, None
# <generated-code>
# fmt: off
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# The part of the file below was automatically generated from the code.
# Do NOT edit this part of the file manually as any edits will be overwritten by the generation
# of the file. If any change should be done, please apply the changes to the `pipeline` function
# below and run `python utils/check_pipeline_typing.py --fix_and_overwrite` to update the file.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
from typing import Literal, overload
@overload
def pipeline(task: Literal[None], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> Pipeline: ...
@overload
def pipeline(task: Literal["audio-classification"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> AudioClassificationPipeline: ...
@overload
def pipeline(task: Literal["automatic-speech-recognition"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> AutomaticSpeechRecognitionPipeline: ...
@overload
def pipeline(task: Literal["depth-estimation"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> DepthEstimationPipeline: ...
@overload
def pipeline(task: Literal["document-question-answering"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> DocumentQuestionAnsweringPipeline: ...
@overload
def pipeline(task: Literal["feature-extraction"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> FeatureExtractionPipeline: ...
@overload
def pipeline(task: Literal["fill-mask"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> FillMaskPipeline: ...
@overload
def pipeline(task: Literal["image-classification"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> ImageClassificationPipeline: ...
@overload
def pipeline(task: Literal["image-feature-extraction"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> ImageFeatureExtractionPipeline: ...
@overload
def pipeline(task: Literal["image-segmentation"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> ImageSegmentationPipeline: ...
@overload
def pipeline(task: Literal["image-text-to-text"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> ImageTextToTextPipeline: ...
@overload
def pipeline(task: Literal["image-to-image"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> ImageToImagePipeline: ...
@overload
def pipeline(task: Literal["image-to-text"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> ImageToTextPipeline: ...
@overload
def pipeline(task: Literal["mask-generation"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> MaskGenerationPipeline: ...
@overload
def pipeline(task: Literal["object-detection"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> ObjectDetectionPipeline: ...
@overload
def pipeline(task: Literal["question-answering"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> QuestionAnsweringPipeline: ...
@overload
def pipeline(task: Literal["summarization"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> SummarizationPipeline: ...
@overload
def pipeline(task: Literal["table-question-answering"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> TableQuestionAnsweringPipeline: ...
@overload
def pipeline(task: Literal["text-classification"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> TextClassificationPipeline: ...
@overload
def pipeline(task: Literal["text-generation"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> TextGenerationPipeline: ...
@overload
def pipeline(task: Literal["text-to-audio"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> TextToAudioPipeline: ...
@overload
def pipeline(task: Literal["text2text-generation"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> Text2TextGenerationPipeline: ...
@overload
def pipeline(task: Literal["token-classification"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> TokenClassificationPipeline: ...
@overload
def pipeline(task: Literal["translation"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> TranslationPipeline: ...
@overload
def pipeline(task: Literal["video-classification"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> VideoClassificationPipeline: ...
@overload
def pipeline(task: Literal["visual-question-answering"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> VisualQuestionAnsweringPipeline: ...
@overload
def pipeline(task: Literal["zero-shot-audio-classification"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> ZeroShotAudioClassificationPipeline: ...
@overload
def pipeline(task: Literal["zero-shot-classification"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> ZeroShotClassificationPipeline: ...
@overload
def pipeline(task: Literal["zero-shot-image-classification"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> ZeroShotImageClassificationPipeline: ...
@overload
def pipeline(task: Literal["zero-shot-object-detection"], model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None, config: Optional[Union[str, PretrainedConfig]] = None, tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None, feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None, image_processor: Optional[Union[str, BaseImageProcessor]] = None, processor: Optional[Union[str, ProcessorMixin]] = None, framework: Optional[str] = None, revision: Optional[str] = None, use_fast: bool = True, token: Optional[Union[str, bool]] = None, device: Optional[Union[int, str, "torch.device"]] = None, device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None, dtype: Optional[Union[str, "torch.dtype"]] = "auto", trust_remote_code: Optional[bool] = None, model_kwargs: Optional[dict[str, Any]] = None, pipeline_class: Optional[Any] = None, **kwargs: Any) -> ZeroShotObjectDetectionPipeline: ...
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# The part of the file above was automatically generated from the code.
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
# fmt: on
# </generated-code>
def pipeline(
task: Optional[str] = None,
model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None,
feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None,
image_processor: Optional[Union[str, BaseImageProcessor]] = None,
processor: Optional[Union[str, ProcessorMixin]] = None,
framework: Optional[str] = None,
revision: Optional[str] = None,
use_fast: bool = True,
token: Optional[Union[str, bool]] = None,
device: Optional[Union[int, str, "torch.device"]] = None,
device_map: Optional[Union[str, dict[str, Union[int, str]]]] = None,
dtype: Optional[Union[str, "torch.dtype"]] = "auto",
trust_remote_code: Optional[bool] = None,
model_kwargs: Optional[dict[str, Any]] = None,
pipeline_class: Optional[Any] = None,
**kwargs: Any,
) -> Pipeline:
"""
Utility factory method to build a [`Pipeline`].
A pipeline consists of:
- One or more components for pre-processing model inputs, such as a [tokenizer](tokenizer),
[image_processor](image_processor), [feature_extractor](feature_extractor), or [processor](processors).
- A [model](model) that generates predictions from the inputs.
- Optional post-processing steps to refine the model's output, which can also be handled by processors.
<Tip>
While there are such optional arguments as `tokenizer`, `feature_extractor`, `image_processor`, and `processor`,
they shouldn't be specified all at once. If these components are not provided, `pipeline` will try to load
required ones automatically. In case you want to provide these components explicitly, please refer to a
specific pipeline in order to get more details regarding what components are required.
</Tip>
Args:
task (`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- `"audio-classification"`: will return a [`AudioClassificationPipeline`].
- `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`].
- `"depth-estimation"`: will return a [`DepthEstimationPipeline`].
- `"document-question-answering"`: will return a [`DocumentQuestionAnsweringPipeline`].
- `"feature-extraction"`: will return a [`FeatureExtractionPipeline`].
- `"fill-mask"`: will return a [`FillMaskPipeline`]:.
- `"image-classification"`: will return a [`ImageClassificationPipeline`].
- `"image-feature-extraction"`: will return an [`ImageFeatureExtractionPipeline`].
- `"image-segmentation"`: will return a [`ImageSegmentationPipeline`].
- `"image-text-to-text"`: will return a [`ImageTextToTextPipeline`].
- `"image-to-image"`: will return a [`ImageToImagePipeline`].
- `"image-to-text"`: will return a [`ImageToTextPipeline`].
- `"mask-generation"`: will return a [`MaskGenerationPipeline`].
- `"object-detection"`: will return a [`ObjectDetectionPipeline`].
- `"question-answering"`: will return a [`QuestionAnsweringPipeline`].
- `"summarization"`: will return a [`SummarizationPipeline`].
- `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`].
- `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`].
- `"text-classification"` (alias `"sentiment-analysis"` available): will return a
[`TextClassificationPipeline`].
- `"text-generation"`: will return a [`TextGenerationPipeline`]:.
- `"text-to-audio"` (alias `"text-to-speech"` available): will return a [`TextToAudioPipeline`]:.
- `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`].
- `"translation"`: will return a [`TranslationPipeline`].
- `"translation_xx_to_yy"`: will return a [`TranslationPipeline`].
- `"video-classification"`: will return a [`VideoClassificationPipeline`].
- `"visual-question-answering"`: will return a [`VisualQuestionAnsweringPipeline`].
- `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`].
- `"zero-shot-image-classification"`: will return a [`ZeroShotImageClassificationPipeline`].
- `"zero-shot-audio-classification"`: will return a [`ZeroShotAudioClassificationPipeline`].
- `"zero-shot-object-detection"`: will return a [`ZeroShotObjectDetectionPipeline`].
model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*):
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or
[`TFPreTrainedModel`] (for TensorFlow).
If not provided, the default for the `task` will be loaded.
config (`str` or [`PretrainedConfig`], *optional*):
The configuration that will be used by the pipeline to instantiate the model. This can be a model
identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`].
If not provided, the default configuration file for the requested model will be used. That means that if
`model` is given, its default configuration will be used. However, if `model` is not supplied, this
`task`'s default model's config is used instead.
tokenizer (`str` or [`PreTrainedTokenizer`], *optional*):
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`].
If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model`
is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string).
However, if `config` is also not given or not a string, then the default tokenizer for the given `task`
will be loaded.
feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*):
The feature extractor that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`].
Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal
models. Multi-modal models will also require a tokenizer to be passed.
If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If
`model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it
is a string). However, if `config` is also not given or not a string, then the default feature extractor
for the given `task` will be loaded.
image_processor (`str` or [`BaseImageProcessor`], *optional*):
The image processor that will be used by the pipeline to preprocess images for the model. This can be a
model identifier or an actual image processor inheriting from [`BaseImageProcessor`].
Image processors are used for Vision models and multi-modal models that require image inputs. Multi-modal
models will also require a tokenizer to be passed.
If not provided, the default image processor for the given `model` will be loaded (if it is a string). If
`model` is not specified or not a string, then the default image processor for `config` is loaded (if it is
a string).
processor (`str` or [`ProcessorMixin`], *optional*):
The processor that will be used by the pipeline to preprocess data for the model. This can be a model
identifier or an actual processor inheriting from [`ProcessorMixin`].
Processors are used for multi-modal models that require multi-modal inputs, for example, a model that
requires both text and image inputs.
If not provided, the default processor for the given `model` will be loaded (if it is a string). If `model`
is not specified or not a string, then the default processor for `config` is loaded (if it is a string).
framework (`str`, *optional*):
The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified and
both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
provided.
revision (`str`, *optional*, defaults to `"main"`):
When passing a task name or a string model identifier: The specific model version to use. It can be a
branch name, a tag name, or a commit id, since we use a git-based system for storing models and other
artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
use_fast (`bool`, *optional*, defaults to `True`):
Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]).
use_auth_token (`str` or *bool*, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
when running `hf auth login` (stored in `~/.huggingface`).
device (`int` or `str` or `torch.device`):
Defines the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which this
pipeline will be allocated.
device_map (`str` or `dict[str, Union[int, str, torch.device]`, *optional*):
Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set
`device_map="auto"` to compute the most optimized `device_map` automatically (see
[here](https://huggingface.co/docs/accelerate/main/en/package_reference/big_modeling#accelerate.cpu_offload)
for more information).
<Tip warning={true}>
Do not use `device_map` AND `device` at the same time as they will conflict
</Tip>
dtype (`str` or `torch.dtype`, *optional*):
Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model
(`torch.float16`, `torch.bfloat16`, ... or `"auto"`).
trust_remote_code (`bool`, *optional*, defaults to `False`):
Whether or not to allow for custom code defined on the Hub in their own modeling, configuration,
tokenization or even pipeline files. This option should only be set to `True` for repositories you trust
and in which you have read the code, as it will execute code present on the Hub on your local machine.
model_kwargs (`dict[str, Any]`, *optional*):
Additional dictionary of keyword arguments passed along to the model's `from_pretrained(...,
**model_kwargs)` function.
kwargs (`dict[str, Any]`, *optional*):
Additional keyword arguments passed along to the specific pipeline init (see the documentation for the
corresponding pipeline class for possible values).
Returns:
[`Pipeline`]: A suitable pipeline for the task.
Examples:
```python
>>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
>>> # Sentiment analysis pipeline
>>> analyzer = pipeline("sentiment-analysis")
>>> # Question answering pipeline, specifying the checkpoint identifier
>>> oracle = pipeline(
... "question-answering", model="distilbert/distilbert-base-cased-distilled-squad", tokenizer="google-bert/bert-base-cased"
... )
>>> # Named entity recognition pipeline, passing in a specific model and tokenizer
>>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
>>> recognizer = pipeline("ner", model=model, tokenizer=tokenizer)
```"""
if model_kwargs is None:
model_kwargs = {}
# Make sure we only pass use_auth_token once as a kwarg (it used to be possible to pass it in model_kwargs,
# this is to keep BC).
use_auth_token = model_kwargs.pop("use_auth_token", None)
if use_auth_token is not None:
warnings.warn(
"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
FutureWarning,
)
if token is not None:
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
token = use_auth_token
code_revision = kwargs.pop("code_revision", None)
commit_hash = kwargs.pop("_commit_hash", None)
hub_kwargs = {
"revision": revision,
"token": token,
"trust_remote_code": trust_remote_code,
"_commit_hash": commit_hash,
}
if task is None and model is None:
raise RuntimeError(
"Impossible to instantiate a pipeline without either a task or a model "
"being specified. "
"Please provide a task class or a model"
)
if model is None and tokenizer is not None:
raise RuntimeError(
"Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer"
" may not be compatible with the default model. Please provide a PreTrainedModel class or a"
" path/identifier to a pretrained model when providing tokenizer."
)
if model is None and feature_extractor is not None:
raise RuntimeError(
"Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided"
" feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class"
" or a path/identifier to a pretrained model when providing feature_extractor."
)
if isinstance(model, Path):
model = str(model)
if commit_hash is None:
pretrained_model_name_or_path = None
if isinstance(config, str):
pretrained_model_name_or_path = config
elif config is None and isinstance(model, str):
pretrained_model_name_or_path = model
if not isinstance(config, PretrainedConfig) and pretrained_model_name_or_path is not None:
# We make a call to the config file first (which may be absent) to get the commit hash as soon as possible
resolved_config_file = cached_file(
pretrained_model_name_or_path,
CONFIG_NAME,
_raise_exceptions_for_gated_repo=False,
_raise_exceptions_for_missing_entries=False,
_raise_exceptions_for_connection_errors=False,
cache_dir=model_kwargs.get("cache_dir"),
**hub_kwargs,
)
hub_kwargs["_commit_hash"] = extract_commit_hash(resolved_config_file, commit_hash)
else:
hub_kwargs["_commit_hash"] = getattr(config, "_commit_hash", None)
# Config is the primordial information item.
# Instantiate config if needed
adapter_path = None
if isinstance(config, str):
config = AutoConfig.from_pretrained(
config, _from_pipeline=task, code_revision=code_revision, **hub_kwargs, **model_kwargs
)
hub_kwargs["_commit_hash"] = config._commit_hash
elif config is None and isinstance(model, str):
# Check for an adapter file in the model path if PEFT is available
if is_peft_available():
# `find_adapter_config_file` doesn't accept `trust_remote_code`
_hub_kwargs = {k: v for k, v in hub_kwargs.items() if k != "trust_remote_code"}
maybe_adapter_path = find_adapter_config_file(
model,
token=hub_kwargs["token"],
revision=hub_kwargs["revision"],
_commit_hash=hub_kwargs["_commit_hash"],
)
if maybe_adapter_path is not None:
with open(maybe_adapter_path, "r", encoding="utf-8") as f:
adapter_config = json.load(f)
adapter_path = model
model = adapter_config["base_model_name_or_path"]
config = AutoConfig.from_pretrained(
model, _from_pipeline=task, code_revision=code_revision, **hub_kwargs, **model_kwargs
)
hub_kwargs["_commit_hash"] = config._commit_hash
custom_tasks = {}
if config is not None and len(getattr(config, "custom_pipelines", {})) > 0:
custom_tasks = config.custom_pipelines
if task is None and trust_remote_code is not False:
if len(custom_tasks) == 1:
task = list(custom_tasks.keys())[0]
else:
raise RuntimeError(
"We can't infer the task automatically for this model as there are multiple tasks available. Pick "
f"one in {', '.join(custom_tasks.keys())}"
)
if task is None and model is not None:
if not isinstance(model, str):
raise RuntimeError(
"Inferring the task automatically requires to check the hub with a model_id defined as a `str`. "
f"{model} is not a valid model_id."
)
task = get_task(model, token)
# Retrieve the task
if task in custom_tasks:
normalized_task = task
targeted_task, task_options = clean_custom_task(custom_tasks[task])
if pipeline_class is None:
if not trust_remote_code:
raise ValueError(
"Loading this pipeline requires you to execute the code in the pipeline file in that"
" repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
" set the option `trust_remote_code=True` to remove this error."
)
class_ref = targeted_task["impl"]
pipeline_class = get_class_from_dynamic_module(
class_ref,
model,
code_revision=code_revision,
**hub_kwargs,
)
else:
normalized_task, targeted_task, task_options = check_task(task)
if pipeline_class is None:
pipeline_class = targeted_task["impl"]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
# At that point framework might still be undetermined
model, default_revision = get_default_model_and_revision(targeted_task, framework, task_options)
revision = revision if revision is not None else default_revision
logger.warning(
f"No model was supplied, defaulted to {model} and revision"
f" {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).\n"
"Using a pipeline without specifying a model name and revision in production is not recommended."
)
hub_kwargs["revision"] = revision
if config is None and isinstance(model, str):
config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs)
hub_kwargs["_commit_hash"] = config._commit_hash
if device_map is not None:
if "device_map" in model_kwargs:
raise ValueError(
'You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those'
" arguments might conflict, use only one.)"
)
if device is not None:
logger.warning(
"Both `device` and `device_map` are specified. `device` will override `device_map`. You"
" will most likely encounter unexpected behavior. Please remove `device` and keep `device_map`."
)
model_kwargs["device_map"] = device_map
# BC for the `torch_dtype` argument
if (torch_dtype := kwargs.get("torch_dtype")) is not None:
logger.warning_once("`torch_dtype` is deprecated! Use `dtype` instead!")
# If both are provided, keep `dtype`
dtype = torch_dtype if dtype == "auto" else dtype
if "torch_dtype" in model_kwargs or "dtype" in model_kwargs:
if "torch_dtype" in model_kwargs:
logger.warning_once("`torch_dtype` is deprecated! Use `dtype` instead!")
# If the user did not explicitly provide `dtype` (i.e. the function default "auto" is still
# present) but a value is supplied inside `model_kwargs`, we silently defer to the latter instead of
# raising. This prevents false positives like providing `dtype` only via `model_kwargs` while the
# top-level argument keeps its default value "auto".
if dtype == "auto":
dtype = None
else:
raise ValueError(
'You cannot use both `pipeline(... dtype=..., model_kwargs={"dtype":...})` as those'
" arguments might conflict, use only one.)"
)
if dtype is not None:
if isinstance(dtype, str) and hasattr(torch, dtype):
dtype = getattr(torch, dtype)
model_kwargs["dtype"] = dtype
model_name = model if isinstance(model, str) else None
# Load the correct model if possible
# Infer the framework from the model if not already defined
if isinstance(model, str) or framework is None:
model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]}
framework, model = infer_framework_load_model(
adapter_path if adapter_path is not None else model,
model_classes=model_classes,
config=config,
framework=framework,
task=task,
**hub_kwargs,
**model_kwargs,
)
hub_kwargs["_commit_hash"] = model.config._commit_hash
# Check which preprocessing classes the pipeline uses
# None values indicate optional classes that the pipeline can run without, we don't raise errors if loading fails
load_tokenizer = pipeline_class._load_tokenizer
load_feature_extractor = pipeline_class._load_feature_extractor
load_image_processor = pipeline_class._load_image_processor
load_processor = pipeline_class._load_processor
if load_tokenizer or load_tokenizer is None:
try:
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model_name, str):
tokenizer = model_name
elif isinstance(config, str):
tokenizer = config
else:
# Impossible to guess what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
use_fast = tokenizer[1].pop("use_fast", use_fast)
tokenizer_identifier = tokenizer[0]
tokenizer_kwargs = tokenizer[1]
else:
tokenizer_identifier = tokenizer
tokenizer_kwargs = model_kwargs.copy()
tokenizer_kwargs.pop("torch_dtype", None), tokenizer_kwargs.pop("dtype", None)
tokenizer = AutoTokenizer.from_pretrained(
tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs
)
except Exception as e:
if load_tokenizer:
raise e
else:
tokenizer = None
if load_image_processor or load_image_processor is None:
try:
# Try to infer image processor from model or config name (if provided as str)
if image_processor is None:
if isinstance(model_name, str):
image_processor = model_name
elif isinstance(config, str):
image_processor = config
# Backward compatibility, as `feature_extractor` used to be the name
# for `ImageProcessor`.
elif feature_extractor is not None and isinstance(feature_extractor, BaseImageProcessor):
image_processor = feature_extractor
else:
# Impossible to guess what is the right image_processor here
raise Exception(
"Impossible to guess which image processor to use. "
"Please provide a PreTrainedImageProcessor class or a path/identifier "
"to a pretrained image processor."
)
# Instantiate image_processor if needed
if isinstance(image_processor, (str, tuple)):
image_processor = AutoImageProcessor.from_pretrained(
image_processor, _from_pipeline=task, **hub_kwargs, **model_kwargs
)
except Exception as e:
if load_image_processor:
raise e
else:
image_processor = None
if load_feature_extractor or load_feature_extractor is None:
try:
# Try to infer feature extractor from model or config name (if provided as str)
if feature_extractor is None:
if isinstance(model_name, str):
feature_extractor = model_name
elif isinstance(config, str):
feature_extractor = config
else:
# Impossible to guess what is the right feature_extractor here
raise Exception(
"Impossible to guess which feature extractor to use. "
"Please provide a PreTrainedFeatureExtractor class or a path/identifier "
"to a pretrained feature extractor."
)
# Instantiate feature_extractor if needed
if isinstance(feature_extractor, (str, tuple)):
feature_extractor = AutoFeatureExtractor.from_pretrained(
feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs
)
if (
feature_extractor._processor_class
and feature_extractor._processor_class.endswith("WithLM")
and isinstance(model_name, str)
):
try:
import kenlm # to trigger `ImportError` if not installed
from pyctcdecode import BeamSearchDecoderCTC
if os.path.isdir(model_name) or os.path.isfile(model_name):
decoder = BeamSearchDecoderCTC.load_from_dir(model_name)
else:
language_model_glob = os.path.join(
BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*"
)
alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME
allow_patterns = [language_model_glob, alphabet_filename]
decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_patterns=allow_patterns)
kwargs["decoder"] = decoder
except ImportError as e:
logger.warning(
f"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Error: {e}"
)
if not is_kenlm_available():
logger.warning("Try to install `kenlm`: `pip install kenlm")
if not is_pyctcdecode_available():
logger.warning("Try to install `pyctcdecode`: `pip install pyctcdecode")
except Exception as e:
if load_feature_extractor:
raise e
else:
feature_extractor = None
if load_processor or load_processor is None:
try:
# Try to infer processor from model or config name (if provided as str)
if processor is None:
if isinstance(model_name, str):
processor = model_name
elif isinstance(config, str):
processor = config
else:
# Impossible to guess what is the right processor here
raise Exception(
"Impossible to guess which processor to use. "
"Please provide a processor instance or a path/identifier "
"to a processor."
)
# Instantiate processor if needed
if isinstance(processor, (str, tuple)):
processor = AutoProcessor.from_pretrained(processor, _from_pipeline=task, **hub_kwargs, **model_kwargs)
if not isinstance(processor, ProcessorMixin):
raise TypeError(
"Processor was loaded, but it is not an instance of `ProcessorMixin`. "
f"Got type `{type(processor)}` instead. Please check that you specified "
"correct pipeline task for the model and model has processor implemented and saved."
)
except Exception as e:
if load_processor:
raise e
else:
processor = None
if task == "translation" and model.config.task_specific_params:
for key in model.config.task_specific_params:
if key.startswith("translation"):
task = key
warnings.warn(
f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"',
UserWarning,
)
break
if tokenizer is not None:
kwargs["tokenizer"] = tokenizer
if feature_extractor is not None:
kwargs["feature_extractor"] = feature_extractor
if dtype is not None:
kwargs["dtype"] = dtype
if image_processor is not None:
kwargs["image_processor"] = image_processor
if device is not None:
kwargs["device"] = device
if processor is not None:
kwargs["processor"] = processor
return pipeline_class(model=model, framework=framework, task=task, **kwargs)
| transformers/src/transformers/pipelines/__init__.py/0 | {
"file_path": "transformers/src/transformers/pipelines/__init__.py",
"repo_id": "transformers",
"token_count": 32668
} | 523 |
from typing import TYPE_CHECKING, Any, Union, overload
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import Pipeline, build_pipeline_init_args
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import (
MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
)
if TYPE_CHECKING:
from PIL import Image
logger = logging.get_logger(__name__)
@add_end_docstrings(build_pipeline_init_args(has_image_processor=True))
class ObjectDetectionPipeline(Pipeline):
"""
Object detection pipeline using any `AutoModelForObjectDetection`. This pipeline predicts bounding boxes of objects
and their classes.
Example:
```python
>>> from transformers import pipeline
>>> detector = pipeline(model="facebook/detr-resnet-50")
>>> detector("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
[{'score': 0.997, 'label': 'bird', 'box': {'xmin': 69, 'ymin': 171, 'xmax': 396, 'ymax': 507}}, {'score': 0.999, 'label': 'bird', 'box': {'xmin': 398, 'ymin': 105, 'xmax': 767, 'ymax': 507}}]
>>> # x, y are expressed relative to the top left hand corner.
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
This object detection pipeline can currently be loaded from [`pipeline`] using the following task identifier:
`"object-detection"`.
See the list of available models on [huggingface.co/models](https://huggingface.co/models?filter=object-detection).
"""
_load_processor = False
_load_image_processor = True
_load_feature_extractor = False
_load_tokenizer = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch.")
requires_backends(self, "vision")
mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES.copy()
mapping.update(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES)
self.check_model_type(mapping)
def _sanitize_parameters(self, **kwargs):
preprocess_params = {}
if "timeout" in kwargs:
preprocess_params["timeout"] = kwargs["timeout"]
postprocess_kwargs = {}
if "threshold" in kwargs:
postprocess_kwargs["threshold"] = kwargs["threshold"]
return preprocess_params, {}, postprocess_kwargs
@overload
def __call__(self, image: Union[str, "Image.Image"], *args: Any, **kwargs: Any) -> list[dict[str, Any]]: ...
@overload
def __call__(
self, image: Union[list[str], list["Image.Image"]], *args: Any, **kwargs: Any
) -> list[list[dict[str, Any]]]: ...
def __call__(self, *args, **kwargs) -> Union[list[dict[str, Any]], list[list[dict[str, Any]]]]:
"""
Detect objects (bounding boxes & classes) in the image(s) passed as inputs.
Args:
inputs (`str`, `list[str]`, `PIL.Image` or `list[PIL.Image]`):
The pipeline handles three types of images:
- A string containing an HTTP(S) link pointing to an image
- A string containing a local path to an image
- An image loaded in PIL directly
The pipeline accepts either a single image or a batch of images. Images in a batch must all be in the
same format: all as HTTP(S) links, all as local paths, or all as PIL images.
threshold (`float`, *optional*, defaults to 0.5):
The probability necessary to make a prediction.
timeout (`float`, *optional*, defaults to None):
The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
the call may block forever.
Return:
A list of dictionaries or a list of list of dictionaries containing the result. If the input is a single
image, will return a list of dictionaries, if the input is a list of several images, will return a list of
list of dictionaries corresponding to each image.
The dictionaries contain the following keys:
- **label** (`str`) -- The class label identified by the model.
- **score** (`float`) -- The score attributed by the model for that label.
- **box** (`list[dict[str, int]]`) -- The bounding box of detected object in image's original size.
"""
# After deprecation of this is completed, remove the default `None` value for `images`
if "images" in kwargs and "inputs" not in kwargs:
kwargs["inputs"] = kwargs.pop("images")
return super().__call__(*args, **kwargs)
def preprocess(self, image, timeout=None):
image = load_image(image, timeout=timeout)
target_size = torch.IntTensor([[image.height, image.width]])
inputs = self.image_processor(images=[image], return_tensors="pt")
if self.framework == "pt":
inputs = inputs.to(self.dtype)
if self.tokenizer is not None:
inputs = self.tokenizer(text=inputs["words"], boxes=inputs["boxes"], return_tensors="pt")
inputs["target_size"] = target_size
return inputs
def _forward(self, model_inputs):
target_size = model_inputs.pop("target_size")
outputs = self.model(**model_inputs)
model_outputs = outputs.__class__({"target_size": target_size, **outputs})
if self.tokenizer is not None:
model_outputs["bbox"] = model_inputs["bbox"]
return model_outputs
def postprocess(self, model_outputs, threshold=0.5):
target_size = model_outputs["target_size"]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
height, width = target_size[0].tolist()
def unnormalize(bbox):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
]
)
)
scores, classes = model_outputs["logits"].squeeze(0).softmax(dim=-1).max(dim=-1)
labels = [self.model.config.id2label[prediction] for prediction in classes.tolist()]
boxes = [unnormalize(bbox) for bbox in model_outputs["bbox"].squeeze(0)]
keys = ["score", "label", "box"]
annotation = [dict(zip(keys, vals)) for vals in zip(scores.tolist(), labels, boxes) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
raw_annotations = self.image_processor.post_process_object_detection(model_outputs, threshold, target_size)
raw_annotation = raw_annotations[0]
scores = raw_annotation["scores"]
labels = raw_annotation["labels"]
boxes = raw_annotation["boxes"]
raw_annotation["scores"] = scores.tolist()
raw_annotation["labels"] = [self.model.config.id2label[label.item()] for label in labels]
raw_annotation["boxes"] = [self._get_bounding_box(box) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
keys = ["score", "label", "box"]
annotation = [
dict(zip(keys, vals))
for vals in zip(raw_annotation["scores"], raw_annotation["labels"], raw_annotation["boxes"])
]
return annotation
def _get_bounding_box(self, box: "torch.Tensor") -> dict[str, int]:
"""
Turns list [xmin, xmax, ymin, ymax] into dict { "xmin": xmin, ... }
Args:
box (`torch.Tensor`): Tensor containing the coordinates in corners format.
Returns:
bbox (`dict[str, int]`): Dict containing the coordinates in corners format.
"""
if self.framework != "pt":
raise ValueError("The ObjectDetectionPipeline is only available in PyTorch.")
xmin, ymin, xmax, ymax = box.int().tolist()
bbox = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| transformers/src/transformers/pipelines/object_detection.py/0 | {
"file_path": "transformers/src/transformers/pipelines/object_detection.py",
"repo_id": "transformers",
"token_count": 3697
} | 524 |
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from typing import TYPE_CHECKING
from packaging import version
from .base import HfQuantizer
if TYPE_CHECKING:
from ..modeling_utils import PreTrainedModel
from ..utils import is_auto_gptq_available, is_gptqmodel_available, is_optimum_available, is_torch_available, logging
from ..utils.quantization_config import GPTQConfig, QuantizationConfigMixin
if is_torch_available():
import torch
logger = logging.get_logger(__name__)
class GptqHfQuantizer(HfQuantizer):
"""
Quantizer of the GPTQ method - for GPTQ the quantizer support calibration of the model through
`auto_gptq` or `gptqmodel` package. Quantization is done under the hood for users if they load a non-prequantized model.
"""
requires_calibration = False
required_packages = ["optimum", "auto_gptq", "gptqmodel"]
optimum_quantizer = None
def __init__(self, quantization_config: QuantizationConfigMixin, **kwargs):
super().__init__(quantization_config, **kwargs)
if not is_optimum_available():
raise ImportError("Loading a GPTQ quantized model requires optimum (`pip install optimum`)")
from optimum.gptq import GPTQQuantizer
self.optimum_quantizer = GPTQQuantizer.from_dict(self.quantization_config.to_dict_optimum())
def validate_environment(self, *args, **kwargs):
if not is_optimum_available():
raise ImportError("Loading a GPTQ quantized model requires optimum (`pip install optimum`)")
if is_auto_gptq_available() and is_gptqmodel_available():
logger.warning("Detected gptqmodel and auto-gptq, will use gptqmodel")
gptq_supports_cpu = (
is_auto_gptq_available()
and version.parse(importlib.metadata.version("auto-gptq")) > version.parse("0.4.2")
) or is_gptqmodel_available()
if not gptq_supports_cpu and not torch.cuda.is_available():
raise RuntimeError("GPU is required to quantize or run quantize model.")
elif not (is_auto_gptq_available() or is_gptqmodel_available()):
raise ImportError(
"Loading a GPTQ quantized model requires gptqmodel (`pip install gptqmodel`) or auto-gptq (`pip install auto-gptq`) library. "
)
elif is_auto_gptq_available() and version.parse(importlib.metadata.version("auto_gptq")) < version.parse(
"0.4.2"
):
raise ImportError(
"You need a version of auto_gptq >= 0.4.2 to use GPTQ: `pip install --upgrade auto-gptq` or use gptqmodel by `pip install gptqmodel>=1.4.3`."
)
elif is_gptqmodel_available() and (
version.parse(importlib.metadata.version("gptqmodel")) < version.parse("1.4.3")
or version.parse(importlib.metadata.version("optimum")) < version.parse("1.23.99")
):
raise ImportError("The gptqmodel version should be >= 1.4.3, optimum version should >= 1.24.0")
def update_dtype(self, dtype: "torch.dtype") -> "torch.dtype":
if dtype is None:
dtype = torch.float16
logger.info("Loading the model in `torch.float16`. To overwrite it, set `dtype` manually.")
elif dtype != torch.float16:
logger.info("We suggest you to set `dtype=torch.float16` for better efficiency with GPTQ.")
return dtype
def update_device_map(self, device_map):
if device_map is None:
device_map = {"": torch.device("cpu")}
# Only with auto-gptq do not support CPU, we should move the model to cuda if available.
if not is_gptqmodel_available() and device_map in ("cpu", {"": torch.device("cpu")}):
device_map == {"": 0}
return device_map
def _process_model_before_weight_loading(self, model: "PreTrainedModel", **kwargs):
if model.__class__.main_input_name != "input_ids":
raise RuntimeError("We can only quantize pure text model.")
if self.pre_quantized:
# compat: latest optimum has gptqmodel refactor
if version.parse(importlib.metadata.version("optimum")) <= version.parse("1.23.99"):
model = self.optimum_quantizer.convert_model(model)
else:
model = self.optimum_quantizer.convert_model(model, **kwargs)
def _process_model_after_weight_loading(self, model: "PreTrainedModel", **kwargs):
if self.pre_quantized:
model = self.optimum_quantizer.post_init_model(model)
else:
if self.quantization_config.tokenizer is None:
self.quantization_config.tokenizer = model.name_or_path
self.optimum_quantizer.quantize_model(model, self.quantization_config.tokenizer)
model.config.quantization_config = GPTQConfig.from_dict(self.optimum_quantizer.to_dict())
@property
def is_trainable(self) -> bool:
return True
def is_serializable(self, safe_serialization=None):
return True
| transformers/src/transformers/quantizers/quantizer_gptq.py/0 | {
"file_path": "transformers/src/transformers/quantizers/quantizer_gptq.py",
"repo_id": "transformers",
"token_count": 2194
} | 525 |
# Copyright 2023 The HuggingFace Inc. team.
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Time series distributional output classes and utilities.
"""
from typing import Callable, Optional
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class AffineTransformed(TransformedDistribution):
def __init__(self, base_distribution: Distribution, loc=None, scale=None, event_dim=0):
self.scale = 1.0 if scale is None else scale
self.loc = 0.0 if loc is None else loc
super().__init__(base_distribution, [AffineTransform(loc=self.loc, scale=self.scale, event_dim=event_dim)])
@property
def mean(self):
"""
Returns the mean of the distribution.
"""
return self.base_dist.mean * self.scale + self.loc
@property
def variance(self):
"""
Returns the variance of the distribution.
"""
return self.base_dist.variance * self.scale**2
@property
def stddev(self):
"""
Returns the standard deviation of the distribution.
"""
return self.variance.sqrt()
class ParameterProjection(nn.Module):
def __init__(
self, in_features: int, args_dim: dict[str, int], domain_map: Callable[..., tuple[torch.Tensor]], **kwargs
) -> None:
super().__init__(**kwargs)
self.args_dim = args_dim
self.proj = nn.ModuleList([nn.Linear(in_features, dim) for dim in args_dim.values()])
self.domain_map = domain_map
def forward(self, x: torch.Tensor) -> tuple[torch.Tensor]:
params_unbounded = [proj(x) for proj in self.proj]
return self.domain_map(*params_unbounded)
class LambdaLayer(nn.Module):
def __init__(self, function):
super().__init__()
self.function = function
def forward(self, x, *args):
return self.function(x, *args)
class DistributionOutput:
distribution_class: type
in_features: int
args_dim: dict[str, int]
def __init__(self, dim: int = 1) -> None:
self.dim = dim
self.args_dim = {k: dim * self.args_dim[k] for k in self.args_dim}
def _base_distribution(self, distr_args):
if self.dim == 1:
return self.distribution_class(*distr_args)
else:
return Independent(self.distribution_class(*distr_args), 1)
def distribution(
self,
distr_args,
loc: Optional[torch.Tensor] = None,
scale: Optional[torch.Tensor] = None,
) -> Distribution:
distr = self._base_distribution(distr_args)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(distr, loc=loc, scale=scale, event_dim=self.event_dim)
@property
def event_shape(self) -> tuple:
r"""
Shape of each individual event contemplated by the distributions that this object constructs.
"""
return () if self.dim == 1 else (self.dim,)
@property
def event_dim(self) -> int:
r"""
Number of event dimensions, i.e., length of the `event_shape` tuple, of the distributions that this object
constructs.
"""
return len(self.event_shape)
@property
def value_in_support(self) -> float:
r"""
A float that will have a valid numeric value when computing the log-loss of the corresponding distribution. By
default 0.0. This value will be used when padding data series.
"""
return 0.0
def get_parameter_projection(self, in_features: int) -> nn.Module:
r"""
Return the parameter projection layer that maps the input to the appropriate parameters of the distribution.
"""
return ParameterProjection(
in_features=in_features,
args_dim=self.args_dim,
domain_map=LambdaLayer(self.domain_map),
)
def domain_map(self, *args: torch.Tensor):
r"""
Converts arguments to the right shape and domain. The domain depends on the type of distribution, while the
correct shape is obtained by reshaping the trailing axis in such a way that the returned tensors define a
distribution of the right event_shape.
"""
raise NotImplementedError()
@staticmethod
def squareplus(x: torch.Tensor) -> torch.Tensor:
r"""
Helper to map inputs to the positive orthant by applying the square-plus operation. Reference:
https://twitter.com/jon_barron/status/1387167648669048833
"""
return (x + torch.sqrt(torch.square(x) + 4.0)) / 2.0
class StudentTOutput(DistributionOutput):
"""
Student-T distribution output class.
"""
args_dim: dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
distribution_class: type = StudentT
@classmethod
def domain_map(cls, df: torch.Tensor, loc: torch.Tensor, scale: torch.Tensor):
scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps)
df = 2.0 + cls.squareplus(df)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class NormalOutput(DistributionOutput):
"""
Normal distribution output class.
"""
args_dim: dict[str, int] = {"loc": 1, "scale": 1}
distribution_class: type = Normal
@classmethod
def domain_map(cls, loc: torch.Tensor, scale: torch.Tensor):
scale = cls.squareplus(scale).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class NegativeBinomialOutput(DistributionOutput):
"""
Negative Binomial distribution output class.
"""
args_dim: dict[str, int] = {"total_count": 1, "logits": 1}
distribution_class: type = NegativeBinomial
@classmethod
def domain_map(cls, total_count: torch.Tensor, logits: torch.Tensor):
total_count = cls.squareplus(total_count)
return total_count.squeeze(-1), logits.squeeze(-1)
def _base_distribution(self, distr_args) -> Distribution:
total_count, logits = distr_args
if self.dim == 1:
return self.distribution_class(total_count=total_count, logits=logits)
else:
return Independent(self.distribution_class(total_count=total_count, logits=logits), 1)
# Overwrites the parent class method. We cannot scale using the affine
# transformation since negative binomial should return integers. Instead
# we scale the parameters.
def distribution(
self, distr_args, loc: Optional[torch.Tensor] = None, scale: Optional[torch.Tensor] = None
) -> Distribution:
total_count, logits = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits))
| transformers/src/transformers/time_series_utils.py/0 | {
"file_path": "transformers/src/transformers/time_series_utils.py",
"repo_id": "transformers",
"token_count": 2896
} | 526 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.