text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
defaults: - benchmark # inheriting benchmark schema - scenario: inference - launcher: process - backend: pytorch - _self_ # for hydra 1.1 compatibility name: pytorch_generate launcher: start_method: spawn device_isolation: true device_isolation_action: warn backend: device: cuda device_ids: 0 no_weights: true model: meta-llama/Llama-2-7b-hf cache_implementation: static torch_compile: true torch_dtype: float16 torch_compile_config: backend: inductor mode: reduce-overhead fullgraph: true scenario: input_shapes: batch_size: 1 sequence_length: 7 generate_kwargs: max_new_tokens: 128 min_new_tokens: 128 do_sample: false memory: true latency: true iterations: 2 duration: 0 # hydra/cli specific settings hydra: run: # where to store run results dir: runs/${name} job: # change working directory to the run directory chdir: true env_set: # set environment variable OVERRIDE_BENCHMARKS to 1 # to not skip benchmarks that have been run before OVERRIDE_BENCHMARKS: 1 LOG_LEVEL: WARN sweep: dir: multirun subdir: ${hydra.job.override_dirname}
transformers/benchmark/config/generation.yaml/0
{ "file_path": "transformers/benchmark/config/generation.yaml", "repo_id": "transformers", "token_count": 451 }
261
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive # Use login shell to read variables from `~/.profile` (to pass dynamic created variables between RUN commands) SHELL ["sh", "-lc"] # The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant # to be used as arguments for docker build (so far). ARG PYTORCH='2.4.0' # (not always a valid torch version) ARG INTEL_TORCH_EXT='2.3.0' # Example: `cu102`, `cu113`, etc. ARG CUDA='cu121' RUN apt update RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs RUN git lfs install RUN python3 -m pip install --no-cache-dir --upgrade pip ARG REF=main RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF # 1. Put several commands in a single `RUN` to avoid image/layer exporting issue. Could be revised in the future. # 2. Regarding `torch` part, We might need to specify proper versions for `torchvision` and `torchaudio`. # Currently, let's not bother to specify their versions explicitly (so installed with their latest release versions). RUN python3 -m pip install --no-cache-dir -U tensorflow==2.13 protobuf==3.20.3 tensorflow_text tensorflow_probability && python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime] && [ ${#PYTORCH} -gt 0 -a "$PYTORCH" != "pre" ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; echo "export VERSION='$VERSION'" >> ~/.profile && echo torch=$VERSION && [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA RUN python3 -m pip uninstall -y flax jax RUN python3 -m pip install --no-cache-dir intel_extension_for_pytorch==$INTEL_TORCH_EXT -f https://developer.intel.com/ipex-whl-stable-cpu RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract RUN python3 -m pip install -U "itsdangerous<2.1.0" RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/peft@main#egg=peft # For bettertransformer RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/optimum@main#egg=optimum # For video model testing RUN python3 -m pip install --no-cache-dir decord av==9.2.0 # Some slow tests require bnb RUN python3 -m pip install --no-cache-dir bitsandbytes # Some tests require quanto RUN python3 -m pip install --no-cache-dir quanto # `quanto` will install `ninja` which leads to many `CUDA error: an illegal memory access ...` in some model tests # (`deformable_detr`, `rwkv`, `mra`) RUN python3 -m pip uninstall -y ninja # For `dinat` model # The `XXX` part in `torchXXX` needs to match `PYTORCH` (to some extent) RUN python3 -m pip install --no-cache-dir natten==0.15.1+torch220$CUDA -f https://shi-labs.com/natten/wheels # For `nougat` tokenizer RUN python3 -m pip install --no-cache-dir python-Levenshtein # For `FastSpeech2ConformerTokenizer` tokenizer RUN python3 -m pip install --no-cache-dir g2p-en # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop
transformers/docker/transformers-all-latest-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-all-latest-gpu/Dockerfile", "repo_id": "transformers", "token_count": 1223 }
262
### Translating the Transformers documentation into your language As part of our mission to democratize machine learning, we'd love to make the Transformers library available in many more languages! Follow the steps below if you want to help translate the documentation into your language 🙏. **🗞️ Open an issue** To get started, navigate to the [Issues](https://github.com/huggingface/transformers/issues) page of this repo and check if anyone else has opened an issue for your language. If not, open a new issue by selecting the "Translation template" from the "New issue" button. Once an issue exists, post a comment to indicate which chapters you'd like to work on, and we'll add your name to the list. **🍴 Fork the repository** First, you'll need to [fork the Transformers repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo). You can do this by clicking on the **Fork** button on the top-right corner of this repo's page. Once you've forked the repo, you'll want to get the files on your local machine for editing. You can do that by cloning the fork with Git as follows: ```bash git clone https://github.com/YOUR-USERNAME/transformers.git ``` **📋 Copy-paste the English version with a new language code** The documentation files are in one leading directory: - [`docs/source`](https://github.com/huggingface/transformers/tree/main/docs/source): All the documentation materials are organized here by language. You'll only need to copy the files in the [`docs/source/en`](https://github.com/huggingface/transformers/tree/main/docs/source/en) directory, so first navigate to your fork of the repo and run the following: ```bash cd ~/path/to/transformers/docs cp -r source/en source/LANG-ID ``` Here, `LANG-ID` should be one of the ISO 639-1 or ISO 639-2 language codes -- see [here](https://www.loc.gov/standards/iso639-2/php/code_list.php) for a handy table. **✍️ Start translating** The fun part comes - translating the text! The first thing we recommend is translating the part of the `_toctree.yml` file that corresponds to your doc chapter. This file is used to render the table of contents on the website. > 🙋 If the `_toctree.yml` file doesn't yet exist for your language, you can create one by copy-pasting from the English version and deleting the sections unrelated to your chapter. Just make sure it exists in the `docs/source/LANG-ID/` directory! The fields you should add are `local` (with the name of the file containing the translation; e.g. `autoclass_tutorial`), and `title` (with the title of the doc in your language; e.g. `Load pretrained instances with an AutoClass`) -- as a reference, here is the `_toctree.yml` for [English](https://github.com/huggingface/transformers/blob/main/docs/source/en/_toctree.yml): ```yaml - sections: - local: pipeline_tutorial # Do not change this! Use the same name for your .md file title: Pipelines for inference # Translate this! ... title: Tutorials # Translate this! ``` Once you have translated the `_toctree.yml` file, you can start translating the [MDX](https://mdxjs.com/) files associated with your docs chapter. > 🙋 If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/transformers/issues) and tag @stevhliu.
transformers/docs/TRANSLATING.md/0
{ "file_path": "transformers/docs/TRANSLATING.md", "repo_id": "transformers", "token_count": 942 }
263
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Vorverarbeiten [[open-in-colab]] Bevor Sie Ihre Daten in einem Modell verwenden können, müssen die Daten in ein für das Modell akzeptables Format gebracht werden. Ein Modell versteht keine Rohtexte, Bilder oder Audiodaten. Diese Eingaben müssen in Zahlen umgewandelt und zu Tensoren zusammengesetzt werden. In dieser Anleitung werden Sie: * Textdaten mit einem Tokenizer vorverarbeiten. * Bild- oder Audiodaten mit einem Feature Extractor vorverarbeiten. * Daten für eine multimodale Aufgabe mit einem Prozessor vorverarbeiten. ## NLP <Youtube id="Yffk5aydLzg"/> Das wichtigste Werkzeug zur Verarbeitung von Textdaten ist ein [Tokenizer](main_classes/tokenizer). Ein Tokenizer zerlegt Text zunächst nach einer Reihe von Regeln in *Token*. Die Token werden in Zahlen umgewandelt, die zum Aufbau von Tensoren als Eingabe für ein Modell verwendet werden. Alle zusätzlichen Eingaben, die ein Modell benötigt, werden ebenfalls vom Tokenizer hinzugefügt. <Tip> Wenn Sie ein vortrainiertes Modell verwenden möchten, ist es wichtig, den zugehörigen vortrainierten Tokenizer zu verwenden. Dadurch wird sichergestellt, dass der Text auf die gleiche Weise aufgeteilt wird wie das Pretraining-Korpus und die gleichen entsprechenden Token-zu-Index (in der Regel als *vocab* bezeichnet) während des Pretrainings verwendet werden. </Tip> Laden Sie einen vortrainierten Tokenizer mit der Klasse [AutoTokenizer], um schnell loszulegen. Damit wird das *vocab* heruntergeladen, das verwendet wird, wenn ein Modell vortrainiert wird. ### Tokenize Laden Sie einen vortrainierten Tokenizer mit [`AutoTokenizer.from_pretrained`]: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") ``` Dann übergeben Sie Ihren Satz an den Tokenizer: ```py >>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") >>> print(encoded_input) {'input_ids': [101, 2079, 2025, 19960, 10362, 1999, 1996, 3821, 1997, 16657, 1010, 2005, 2027, 2024, 11259, 1998, 4248, 2000, 4963, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` Der Tokenizer gibt ein Wörterbuch mit drei wichtigen Elementen zurück: * [input_ids](glossary#input-ids) sind die Indizes, die den einzelnen Token im Satz entsprechen. * [attention_mask](glossary#attention-mask) gibt an, ob ein Token beachtet werden soll oder nicht. * [token_type_ids](glossary#token-type-ids) gibt an, zu welcher Sequenz ein Token gehört, wenn es mehr als eine Sequenz gibt. Sie können die `input_ids` dekodieren, um die ursprüngliche Eingabe zurückzugeben: ```py >>> tokenizer.decode(encoded_input["input_ids"]) '[CLS] Do not meddle in the affairs of wizards, for they are subtle and quick to anger. [SEP]' ``` Wie Sie sehen können, hat der Tokenisierer zwei spezielle Token - `CLS` und `SEP` (Klassifikator und Separator) - zum Satz hinzugefügt. Nicht alle Modelle benötigen spezielle Token, aber wenn dies der Fall ist, fügt der Tokenisierer sie automatisch für Sie hinzu. Wenn Sie mehrere Sätze verarbeiten wollen, übergeben Sie die Sätze als Liste an den Tokenizer: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_inputs = tokenizer(batch_sentences) >>> print(encoded_inputs) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]]} ``` ### Pad Dies bringt uns zu einem wichtigen Thema. Wenn Sie einen Haufen von Sätzen verarbeiten, sind diese nicht immer gleich lang. Das ist ein Problem, weil Tensoren, die Eingabe für das Modell, eine einheitliche Form haben müssen. Padding ist eine Strategie, die sicherstellt, dass Tensoren rechteckig sind, indem ein spezielles *Padding-Token* zu Sätzen mit weniger Token hinzugefügt wird. Setzen Sie den Parameter "padding" auf "true", um die kürzeren Sequenzen im Stapel so aufzufüllen, dass sie der längsten Sequenz entsprechen: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` Beachten Sie, dass der Tokenizer den ersten und den dritten Satz mit einer "0" aufgefüllt hat, weil sie kürzer sind! ### Kürzung Auf der anderen Seite des Spektrums kann es vorkommen, dass eine Sequenz zu lang für ein Modell ist. In diesem Fall müssen Sie die Sequenz auf eine kürzere Länge kürzen. Setzen Sie den Parameter "truncation" auf "true", um eine Sequenz auf die vom Modell akzeptierte Höchstlänge zu kürzen: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` ### Tensoren erstellen Schließlich möchten Sie, dass der Tokenizer die tatsächlichen Tensoren zurückgibt, die dem Modell zugeführt werden. Setzen Sie den Parameter `return_tensors` entweder auf `pt` für PyTorch, oder `tf` für TensorFlow: <frameworkcontent> <pt> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt") >>> print(encoded_input) {'input_ids': tensor([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]])} ``` </pt> <tf> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="tf") >>> print(encoded_input) {'input_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>} ``` </tf> </frameworkcontent> ## Audio Audioeingaben werden anders vorverarbeitet als Texteingaben, aber das Endziel bleibt dasselbe: numerische Sequenzen zu erstellen, die das Modell verstehen kann. Ein [feature extractor](main_classes/feature_extractor) dient dem ausdrücklichen Zweck, Merkmale aus Rohbild- oder Audiodaten zu extrahieren und in Tensoren zu konvertieren. Bevor Sie beginnen, installieren Sie 🤗 Datasets, um einen Audio-Datensatz zu laden, mit dem Sie experimentieren können: ```bash pip install datasets ``` Laden Sie den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz (weitere Informationen zum Laden eines Datensatzes finden Sie im 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub)): ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` Greifen Sie auf das erste Element der `audio`-Spalte zu, um einen Blick auf die Eingabe zu werfen. Durch den Aufruf der Spalte "audio" wird die Audiodatei automatisch geladen und neu gesampelt: ```py >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` Dies gibt drei Elemente zurück: * "array" ist das Sprachsignal, das als 1D-Array geladen - und möglicherweise neu gesampelt - wurde. * Pfad" zeigt auf den Speicherort der Audiodatei. * `sampling_rate` bezieht sich darauf, wie viele Datenpunkte im Sprachsignal pro Sekunde gemessen werden. ### Resample Für dieses Tutorial werden Sie das Modell [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) verwenden. Wie Sie aus der Modellkarte ersehen können, ist das Wav2Vec2-Modell auf 16kHz abgetastetes Sprachaudio vortrainiert. Es ist wichtig, dass die Abtastrate Ihrer Audiodaten mit der Abtastrate des Datensatzes übereinstimmt, der für das Pre-Training des Modells verwendet wurde. Wenn die Abtastrate Ihrer Daten nicht dieselbe ist, müssen Sie Ihre Audiodaten neu abtasten. Der Datensatz [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) hat zum Beispiel eine Abtastrate von 8000 kHz. Um das Wav2Vec2-Modell mit diesem Datensatz verwenden zu können, müssen Sie die Abtastrate auf 16 kHz erhöhen: ```py >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` 1. Verwenden Sie die Methode [`~datasets.Dataset.cast_column`] von 🤗 Datasets, um die Abtastrate auf 16kHz zu erhöhen: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) ``` 2. Laden Sie die Audiodatei: ```py >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` Wie Sie sehen können, ist die Abtastrate jetzt 16kHz! ### Merkmalsextraktor Der nächste Schritt ist das Laden eines Merkmalsextraktors, um die Eingabe zu normalisieren und aufzufüllen. Beim Auffüllen von Textdaten wird für kürzere Sequenzen ein `0` hinzugefügt. Die gleiche Idee gilt für Audiodaten, und der Audio-Feature-Extraktor fügt eine `0` - interpretiert als Stille - zu `array` hinzu. Laden Sie den Merkmalsextraktor mit [`AutoFeatureExtractor.from_pretrained`]: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` Übergeben Sie das Audio-"Array" an den Feature-Extraktor. Wir empfehlen auch, das Argument `sampling_rate` im Feature Extractor hinzuzufügen, um eventuell auftretende stille Fehler besser zu beheben. ```py >>> audio_input = [dataset[0]["audio"]["array"]] >>> feature_extractor(audio_input, sampling_rate=16000) {'input_values': [array([ 3.8106556e-04, 2.7506407e-03, 2.8015103e-03, ..., 5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]} ``` ### Auffüllen und Kürzen Genau wie beim Tokenizer können Sie variable Sequenzen in einem Stapel durch Auffüllen oder Abschneiden behandeln. Werfen Sie einen Blick auf die Sequenzlänge dieser beiden Audiobeispiele: ```py >>> dataset[0]["audio"]["array"].shape (173398,) >>> dataset[1]["audio"]["array"].shape (106496,) ``` Wie Sie sehen können, hat das erste Beispiel eine längere Sequenz als das zweite Beispiel. Lassen Sie uns eine Funktion erstellen, die den Datensatz vorverarbeitet. Geben Sie eine maximale Länge der Probe an, und der Feature-Extraktor wird die Sequenzen entweder auffüllen oder abschneiden, damit sie dieser Länge entsprechen: ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, ... sampling_rate=16000, ... padding=True, ... max_length=100000, ... truncation=True, ... ) ... return inputs ``` Wenden Sie die Funktion auf die ersten paar Beispiele im Datensatz an: ```py >>> processed_dataset = preprocess_function(dataset[:5]) ``` Schauen Sie sich nun noch einmal die verarbeiteten Beispiel-Längen an: ```py >>> processed_dataset["input_values"][0].shape (100000,) >>> processed_dataset["input_values"][1].shape (100000,) ``` Die Länge der ersten beiden Beispiele entspricht nun der von Ihnen angegebenen Maximallänge. ## Bildverarbeitung Ein Merkmalsextraktor wird auch verwendet, um Bilder für Bildverarbeitungsaufgaben zu verarbeiten. Auch hier besteht das Ziel darin, das Rohbild in eine Reihe von Tensoren als Eingabe zu konvertieren. Laden wir den [food101](https://huggingface.co/datasets/food101) Datensatz für dieses Tutorial. Verwenden Sie den Parameter 🤗 Datasets `split`, um nur eine kleine Stichprobe aus dem Trainingssplit zu laden, da der Datensatz recht groß ist: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("food101", split="train[:100]") ``` Als Nächstes sehen Sie sich das Bild mit dem Merkmal 🤗 Datensätze [Bild](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image) an: ```py >>> dataset[0]["image"] ``` ![vision-preprocess-tutorial.png](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png) ### Merkmalsextraktor Laden Sie den Merkmalsextraktor mit [`AutoImageProcessor.from_pretrained`]: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` ### Datenerweiterung Bei Bildverarbeitungsaufgaben ist es üblich, den Bildern als Teil der Vorverarbeitung eine Art von Datenerweiterung hinzuzufügen. Sie können Erweiterungen mit jeder beliebigen Bibliothek hinzufügen, aber in diesem Tutorial werden Sie das Modul [`transforms`](https://pytorch.org/vision/stable/transforms.html) von torchvision verwenden. 1. Normalisieren Sie das Bild und verwenden Sie [`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html), um einige Transformationen - [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html) und [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html) - miteinander zu verknüpfen: ```py >>> from torchvision.transforms import Compose, Normalize, RandomResizedCrop, ColorJitter, ToTensor >>> normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) >>> _transforms = Compose( ... [RandomResizedCrop(image_processor.size["height"]), ColorJitter(brightness=0.5, hue=0.5), ToTensor(), normalize] ... ) ``` 2. Das Modell akzeptiert [`pixel_values`](model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel.forward.pixel_values) als Eingabe. Dieser Wert wird vom Merkmalsextraktor erzeugt. Erstellen Sie eine Funktion, die `pixel_values` aus den Transformationen erzeugt: ```py >>> def transforms(examples): ... examples["pixel_values"] = [_transforms(image.convert("RGB")) for image in examples["image"]] ... return examples ``` 3. Dann verwenden Sie 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process#format-transform), um die Transformationen im laufenden Betrieb anzuwenden: ```py >>> dataset.set_transform(transforms) ``` 4. Wenn Sie nun auf das Bild zugreifen, werden Sie feststellen, dass der Feature Extractor die Modelleingabe "pixel_values" hinzugefügt hat: ```py >>> dataset[0]["image"] {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F1A7B0630D0>, 'label': 6, 'pixel_values': tensor([[[ 0.0353, 0.0745, 0.1216, ..., -0.9922, -0.9922, -0.9922], [-0.0196, 0.0667, 0.1294, ..., -0.9765, -0.9843, -0.9922], [ 0.0196, 0.0824, 0.1137, ..., -0.9765, -0.9686, -0.8667], ..., [ 0.0275, 0.0745, 0.0510, ..., -0.1137, -0.1216, -0.0824], [ 0.0667, 0.0824, 0.0667, ..., -0.0588, -0.0745, -0.0980], [ 0.0353, 0.0353, 0.0431, ..., -0.0039, -0.0039, -0.0588]], [[ 0.2078, 0.2471, 0.2863, ..., -0.9451, -0.9373, -0.9451], [ 0.1608, 0.2471, 0.3098, ..., -0.9373, -0.9451, -0.9373], [ 0.2078, 0.2706, 0.3020, ..., -0.9608, -0.9373, -0.8275], ..., [-0.0353, 0.0118, -0.0039, ..., -0.2392, -0.2471, -0.2078], [ 0.0196, 0.0353, 0.0196, ..., -0.1843, -0.2000, -0.2235], [-0.0118, -0.0039, -0.0039, ..., -0.0980, -0.0980, -0.1529]], [[ 0.3961, 0.4431, 0.4980, ..., -0.9216, -0.9137, -0.9216], [ 0.3569, 0.4510, 0.5216, ..., -0.9059, -0.9137, -0.9137], [ 0.4118, 0.4745, 0.5216, ..., -0.9137, -0.8902, -0.7804], ..., [-0.2314, -0.1922, -0.2078, ..., -0.4196, -0.4275, -0.3882], [-0.1843, -0.1686, -0.2000, ..., -0.3647, -0.3804, -0.4039], [-0.1922, -0.1922, -0.1922, ..., -0.2941, -0.2863, -0.3412]]])} ``` Hier sehen Sie, wie das Bild nach der Vorverarbeitung aussieht. Wie von den angewandten Transformationen zu erwarten, wurde das Bild willkürlich beschnitten und seine Farbeigenschaften sind anders. ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset[0]["pixel_values"] >>> plt.imshow(img.permute(1, 2, 0)) ``` ![preprocessed_image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png) ## Multimodal Für multimodale Aufgaben werden Sie eine Kombination aus allem, was Sie bisher gelernt haben, verwenden und Ihre Fähigkeiten auf eine Aufgabe der automatischen Spracherkennung (ASR) anwenden. Dies bedeutet, dass Sie einen: * Feature Extractor zur Vorverarbeitung der Audiodaten. * Tokenizer, um den Text zu verarbeiten. Kehren wir zum [LJ Speech](https://huggingface.co/datasets/lj_speech) Datensatz zurück: ```py >>> from datasets import load_dataset >>> lj_speech = load_dataset("lj_speech", split="train") ``` Da Sie hauptsächlich an den Spalten "Audio" und "Text" interessiert sind, entfernen Sie die anderen Spalten: ```py >>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) ``` Schauen Sie sich nun die Spalten "Audio" und "Text" an: ```py >>> lj_speech[0]["audio"] {'array': array([-7.3242188e-04, -7.6293945e-04, -6.4086914e-04, ..., 7.3242188e-04, 2.1362305e-04, 6.1035156e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav', 'sampling_rate': 22050} >>> lj_speech[0]["text"] 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition' ``` Erinnern Sie sich an den früheren Abschnitt über die Verarbeitung von Audiodaten: Sie sollten immer die Abtastrate Ihrer Audiodaten [resample](preprocessing#audio), damit sie mit der Abtastrate des Datensatzes übereinstimmt, der für das Vortraining eines Modells verwendet wird: ```py >>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) ``` ### Prozessor Ein Processor kombiniert einen Feature-Extraktor und einen Tokenizer. Laden Sie einen Processor mit [`AutoProcessor.from_pretrained`]: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") ``` 1. Erstellen Sie eine Funktion, die die Audiodaten zu `input_values` verarbeitet und den Text zu `labels` tokenisiert. Dies sind Ihre Eingaben für das Modell: ```py >>> def prepare_dataset(example): ... audio = example["audio"] ... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000)) ... return example ``` 2. Wenden Sie die Funktion "prepare_dataset" auf ein Beispiel an: ```py >>> prepare_dataset(lj_speech[0]) ``` Beachten Sie, dass der Processor `input_values` und `labels` hinzugefügt hat. Auch die Abtastrate wurde korrekt auf 16kHz heruntergerechnet. Toll, Sie sollten jetzt in der Lage sein, Daten für jede Modalität vorzuverarbeiten und sogar verschiedene Modalitäten zu kombinieren! Im nächsten Kurs lernen Sie, wie Sie ein Modell mit Ihren neu aufbereiteten Daten feinabstimmen können.
transformers/docs/source/de/preprocessing.md/0
{ "file_path": "transformers/docs/source/de/preprocessing.md", "repo_id": "transformers", "token_count": 10560 }
264
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BERTology There is a growing field of study concerned with investigating the inner working of large-scale transformers like BERT (that some call "BERTology"). Some good examples of this field are: - BERT Rediscovers the Classical NLP Pipeline by Ian Tenney, Dipanjan Das, Ellie Pavlick: https://arxiv.org/abs/1905.05950 - Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650 - What Does BERT Look At? An Analysis of BERT's Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D. Manning: https://arxiv.org/abs/1906.04341 - CAT-probing: A Metric-based Approach to Interpret How Pre-trained Models for Programming Language Attend Code Structure: https://arxiv.org/abs/2210.04633 In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to help people access the inner representations, mainly adapted from the great work of Paul Michel (https://arxiv.org/abs/1905.10650): - accessing all the hidden-states of BERT/GPT/GPT-2, - accessing all the attention weights for each head of BERT/GPT/GPT-2, - retrieving heads output values and gradients to be able to compute head importance score and prune head as explained in https://arxiv.org/abs/1905.10650. To help you understand and use these features, we have added a specific example script: [bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py) while extract information and prune a model pre-trained on GLUE.
transformers/docs/source/en/bertology.md/0
{ "file_path": "transformers/docs/source/en/bertology.md", "repo_id": "transformers", "token_count": 640 }
265
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Backbone A backbone is a model used for feature extraction for higher level computer vision tasks such as object detection and image classification. Transformers provides an [`AutoBackbone`] class for initializing a Transformers backbone from pretrained model weights, and two utility classes: * [`~utils.BackboneMixin`] enables initializing a backbone from Transformers or [timm](https://hf.co/docs/timm/index) and includes functions for returning the output features and indices. * [`~utils.BackboneConfigMixin`] sets the output features and indices of the backbone configuration. [timm](https://hf.co/docs/timm/index) models are loaded with the [`TimmBackbone`] and [`TimmBackboneConfig`] classes. Backbones are supported for the following models: * [BEiT](../model_doc/beit) * [BiT](../model_doc/bit) * [ConvNext](../model_doc/convnext) * [ConvNextV2](../model_doc/convnextv2) * [DiNAT](../model_doc/dinat) * [DINOV2](../model_doc/dinov2) * [FocalNet](../model_doc/focalnet) * [MaskFormer](../model_doc/maskformer) * [NAT](../model_doc/nat) * [ResNet](../model_doc/resnet) * [Swin Transformer](../model_doc/swin) * [Swin Transformer v2](../model_doc/swinv2) * [ViTDet](../model_doc/vitdet) ## AutoBackbone [[autodoc]] AutoBackbone ## BackboneMixin [[autodoc]] utils.BackboneMixin ## BackboneConfigMixin [[autodoc]] utils.BackboneConfigMixin ## TimmBackbone [[autodoc]] models.timm_backbone.TimmBackbone ## TimmBackboneConfig [[autodoc]] models.timm_backbone.TimmBackboneConfig
transformers/docs/source/en/main_classes/backbones.md/0
{ "file_path": "transformers/docs/source/en/main_classes/backbones.md", "repo_id": "transformers", "token_count": 689 }
266
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Generation Each framework has a generate method for text generation implemented in their respective `GenerationMixin` class: - PyTorch [`~generation.GenerationMixin.generate`] is implemented in [`~generation.GenerationMixin`]. - TensorFlow [`~generation.TFGenerationMixin.generate`] is implemented in [`~generation.TFGenerationMixin`]. - Flax/JAX [`~generation.FlaxGenerationMixin.generate`] is implemented in [`~generation.FlaxGenerationMixin`]. Regardless of your framework of choice, you can parameterize the generate method with a [`~generation.GenerationConfig`] class instance. Please refer to this class for the complete list of generation parameters, which control the behavior of the generation method. To learn how to inspect a model's generation configuration, what are the defaults, how to change the parameters ad hoc, and how to create and save a customized generation configuration, refer to the [text generation strategies guide](../generation_strategies). The guide also explains how to use related features, like token streaming. ## GenerationConfig [[autodoc]] generation.GenerationConfig - from_pretrained - from_model_config - save_pretrained - update - validate - get_generation_mode [[autodoc]] generation.WatermarkingConfig ## GenerationMixin [[autodoc]] generation.GenerationMixin - generate - compute_transition_scores ## TFGenerationMixin [[autodoc]] generation.TFGenerationMixin - generate - compute_transition_scores ## FlaxGenerationMixin [[autodoc]] generation.FlaxGenerationMixin - generate
transformers/docs/source/en/main_classes/text_generation.md/0
{ "file_path": "transformers/docs/source/en/main_classes/text_generation.md", "repo_id": "transformers", "token_count": 609 }
267
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BERT <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=bert"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-bert-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/bert-base-uncased"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The BERT model was proposed in [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer pretrained using a combination of masked language modeling objective and next sentence prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia. The abstract from the paper is the following: *We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result, the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models for a wide range of tasks, such as question answering and language inference, without substantial task-specific architecture modifications.* *BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).* This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/google-research/bert). ## Usage tips - BERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - BERT was trained with the masked language modeling (MLM) and next sentence prediction (NSP) objectives. It is efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. - Corrupts the inputs by using random masking, more precisely, during pretraining, a given percentage of tokens (usually 15%) is masked by: * a special mask token with probability 0.8 * a random token different from the one masked with probability 0.1 * the same token with probability 0.1 - The model must predict the original sentence, but has a second objective: inputs are two sentences A and B (with a separation token in between). With probability 50%, the sentences are consecutive in the corpus, in the remaining 50% they are not related. The model has to predict if the sentences are consecutive or not. ### Using Scaled Dot Product Attention (SDPA) PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the [official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention) page for more information. SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set `attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used. ``` from transformers import BertModel model = BertModel.from_pretrained("bert-base-uncased", torch_dtype=torch.float16, attn_implementation="sdpa") ... ``` For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`). On a local benchmark (A100-80GB, CPUx12, RAM 96.6GB, PyTorch 2.2.0, OS Ubuntu 22.04) with `float16`, we saw the following speedups during training and inference. #### Training |batch_size|seq_len|Time per batch (eager - s)|Time per batch (sdpa - s)|Speedup (%)|Eager peak mem (MB)|sdpa peak mem (MB)|Mem saving (%)| |----------|-------|--------------------------|-------------------------|-----------|-------------------|------------------|--------------| |4 |256 |0.023 |0.017 |35.472 |939.213 |764.834 |22.800 | |4 |512 |0.023 |0.018 |23.687 |1970.447 |1227.162 |60.569 | |8 |256 |0.023 |0.018 |23.491 |1594.295 |1226.114 |30.028 | |8 |512 |0.035 |0.025 |43.058 |3629.401 |2134.262 |70.054 | |16 |256 |0.030 |0.024 |25.583 |2874.426 |2134.262 |34.680 | |16 |512 |0.064 |0.044 |46.223 |6964.659 |3961.013 |75.830 | #### Inference |batch_size|seq_len|Per token latency eager (ms)|Per token latency SDPA (ms)|Speedup (%)|Mem eager (MB)|Mem BT (MB)|Mem saved (%)| |----------|-------|----------------------------|---------------------------|-----------|--------------|-----------|-------------| |1 |128 |5.736 |4.987 |15.022 |282.661 |282.924 |-0.093 | |1 |256 |5.689 |4.945 |15.055 |298.686 |298.948 |-0.088 | |2 |128 |6.154 |4.982 |23.521 |314.523 |314.785 |-0.083 | |2 |256 |6.201 |4.949 |25.303 |347.546 |347.033 |0.148 | |4 |128 |6.049 |4.987 |21.305 |378.895 |379.301 |-0.107 | |4 |256 |6.285 |5.364 |17.166 |443.209 |444.382 |-0.264 | ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BERT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-classification"/> - A blog post on [BERT Text Classification in a different language](https://www.philschmid.de/bert-text-classification-in-a-different-language). - A notebook for [Finetuning BERT (and friends) for multi-label text classification](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/BERT/Fine_tuning_BERT_(and_friends)_for_multi_label_text_classification.ipynb). - A notebook on how to [Finetune BERT for multi-label classification using PyTorch](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb). 🌎 - A notebook on how to [warm-start an EncoderDecoder model with BERT for summarization](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb). - [`BertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb). - [`TFBertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb). - [`FlaxBertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_flax.ipynb). - [Text classification task guide](../tasks/sequence_classification) <PipelineTag pipeline="token-classification"/> - A blog post on how to use [Hugging Face Transformers with Keras: Fine-tune a non-English BERT for Named Entity Recognition](https://www.philschmid.de/huggingface-transformers-keras-tf). - A notebook for [Finetuning BERT for named-entity recognition](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/BERT/Custom_Named_Entity_Recognition_with_BERT_only_first_wordpiece.ipynb) using only the first wordpiece of each word in the word label during tokenization. To propagate the label of the word to all wordpieces, see this [version](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/BERT/Custom_Named_Entity_Recognition_with_BERT.ipynb) of the notebook instead. - [`BertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb). - [`TFBertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb). - [`FlaxBertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification). - [Token classification](https://huggingface.co/course/chapter7/2?fw=pt) chapter of the 🤗 Hugging Face Course. - [Token classification task guide](../tasks/token_classification) <PipelineTag pipeline="fill-mask"/> - [`BertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). - [`TFBertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). - [`FlaxBertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb). - [Masked language modeling](https://huggingface.co/course/chapter7/3?fw=pt) chapter of the 🤗 Hugging Face Course. - [Masked language modeling task guide](../tasks/masked_language_modeling) <PipelineTag pipeline="question-answering"/> - [`BertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb). - [`TFBertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb). - [`FlaxBertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering). - [Question answering](https://huggingface.co/course/chapter7/7?fw=pt) chapter of the 🤗 Hugging Face Course. - [Question answering task guide](../tasks/question_answering) **Multiple choice** - [`BertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb). - [`TFBertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb). - [Multiple choice task guide](../tasks/multiple_choice) ⚡️ **Inference** - A blog post on how to [Accelerate BERT inference with Hugging Face Transformers and AWS Inferentia](https://huggingface.co/blog/bert-inferentia-sagemaker). - A blog post on how to [Accelerate BERT inference with DeepSpeed-Inference on GPUs](https://www.philschmid.de/bert-deepspeed-inference). ⚙️ **Pretraining** - A blog post on [Pre-Training BERT with Hugging Face Transformers and Habana Gaudi](https://www.philschmid.de/pre-training-bert-habana). 🚀 **Deploy** - A blog post on how to [Convert Transformers to ONNX with Hugging Face Optimum](https://www.philschmid.de/convert-transformers-to-onnx). - A blog post on how to [Setup Deep Learning environment for Hugging Face Transformers with Habana Gaudi on AWS](https://www.philschmid.de/getting-started-habana-gaudi#conclusion). - A blog post on [Autoscaling BERT with Hugging Face Transformers, Amazon SageMaker and Terraform module](https://www.philschmid.de/terraform-huggingface-amazon-sagemaker-advanced). - A blog post on [Serverless BERT with HuggingFace, AWS Lambda, and Docker](https://www.philschmid.de/serverless-bert-with-huggingface-aws-lambda-docker). - A blog post on [Hugging Face Transformers BERT fine-tuning using Amazon SageMaker and Training Compiler](https://www.philschmid.de/huggingface-amazon-sagemaker-training-compiler). - A blog post on [Task-specific knowledge distillation for BERT using Transformers & Amazon SageMaker](https://www.philschmid.de/knowledge-distillation-bert-transformers). ## BertConfig [[autodoc]] BertConfig - all ## BertTokenizer [[autodoc]] BertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary <frameworkcontent> <pt> ## BertTokenizerFast [[autodoc]] BertTokenizerFast </pt> <tf> ## TFBertTokenizer [[autodoc]] TFBertTokenizer </tf> </frameworkcontent> ## Bert specific outputs [[autodoc]] models.bert.modeling_bert.BertForPreTrainingOutput [[autodoc]] models.bert.modeling_tf_bert.TFBertForPreTrainingOutput [[autodoc]] models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput <frameworkcontent> <pt> ## BertModel [[autodoc]] BertModel - forward ## BertForPreTraining [[autodoc]] BertForPreTraining - forward ## BertLMHeadModel [[autodoc]] BertLMHeadModel - forward ## BertForMaskedLM [[autodoc]] BertForMaskedLM - forward ## BertForNextSentencePrediction [[autodoc]] BertForNextSentencePrediction - forward ## BertForSequenceClassification [[autodoc]] BertForSequenceClassification - forward ## BertForMultipleChoice [[autodoc]] BertForMultipleChoice - forward ## BertForTokenClassification [[autodoc]] BertForTokenClassification - forward ## BertForQuestionAnswering [[autodoc]] BertForQuestionAnswering - forward </pt> <tf> ## TFBertModel [[autodoc]] TFBertModel - call ## TFBertForPreTraining [[autodoc]] TFBertForPreTraining - call ## TFBertModelLMHeadModel [[autodoc]] TFBertLMHeadModel - call ## TFBertForMaskedLM [[autodoc]] TFBertForMaskedLM - call ## TFBertForNextSentencePrediction [[autodoc]] TFBertForNextSentencePrediction - call ## TFBertForSequenceClassification [[autodoc]] TFBertForSequenceClassification - call ## TFBertForMultipleChoice [[autodoc]] TFBertForMultipleChoice - call ## TFBertForTokenClassification [[autodoc]] TFBertForTokenClassification - call ## TFBertForQuestionAnswering [[autodoc]] TFBertForQuestionAnswering - call </tf> <jax> ## FlaxBertModel [[autodoc]] FlaxBertModel - __call__ ## FlaxBertForPreTraining [[autodoc]] FlaxBertForPreTraining - __call__ ## FlaxBertForCausalLM [[autodoc]] FlaxBertForCausalLM - __call__ ## FlaxBertForMaskedLM [[autodoc]] FlaxBertForMaskedLM - __call__ ## FlaxBertForNextSentencePrediction [[autodoc]] FlaxBertForNextSentencePrediction - __call__ ## FlaxBertForSequenceClassification [[autodoc]] FlaxBertForSequenceClassification - __call__ ## FlaxBertForMultipleChoice [[autodoc]] FlaxBertForMultipleChoice - __call__ ## FlaxBertForTokenClassification [[autodoc]] FlaxBertForTokenClassification - __call__ ## FlaxBertForQuestionAnswering [[autodoc]] FlaxBertForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/bert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bert.md", "repo_id": "transformers", "token_count": 6475 }
268
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CANINE ## Overview The CANINE model was proposed in [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting. It's among the first papers that trains a Transformer without using an explicit tokenization step (such as Byte Pair Encoding (BPE), WordPiece or SentencePiece). Instead, the model is trained directly at a Unicode character-level. Training at a character-level inevitably comes with a longer sequence length, which CANINE solves with an efficient downsampling strategy, before applying a deep Transformer encoder. The abstract from the paper is the following: *Pipelined NLP systems have largely been superseded by end-to-end neural modeling, yet nearly all commonly-used models still require an explicit tokenization step. While recent tokenization approaches based on data-derived subword lexicons are less brittle than manually engineered tokenizers, these techniques are not equally suited to all languages, and the use of any fixed vocabulary may limit a model's ability to adapt. In this paper, we present CANINE, a neural encoder that operates directly on character sequences, without explicit tokenization or vocabulary, and a pre-training strategy that operates either directly on characters or optionally uses subwords as a soft inductive bias. To use its finer-grained input effectively and efficiently, CANINE combines downsampling, which reduces the input sequence length, with a deep transformer stack, which encodes context. CANINE outperforms a comparable mBERT model by 2.8 F1 on TyDi QA, a challenging multilingual benchmark, despite having 28% fewer model parameters.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/google-research/language/tree/master/language/canine). ## Usage tips - CANINE uses no less than 3 Transformer encoders internally: 2 "shallow" encoders (which only consist of a single layer) and 1 "deep" encoder (which is a regular BERT encoder). First, a "shallow" encoder is used to contextualize the character embeddings, using local attention. Next, after downsampling, a "deep" encoder is applied. Finally, after upsampling, a "shallow" encoder is used to create the final character embeddings. Details regarding up- and downsampling can be found in the paper. - CANINE uses a max sequence length of 2048 characters by default. One can use [`CanineTokenizer`] to prepare text for the model. - Classification can be done by placing a linear layer on top of the final hidden state of the special [CLS] token (which has a predefined Unicode code point). For token classification tasks however, the downsampled sequence of tokens needs to be upsampled again to match the length of the original character sequence (which is 2048). The details for this can be found in the paper. Model checkpoints: - [google/canine-c](https://huggingface.co/google/canine-c): Pre-trained with autoregressive character loss, 12-layer, 768-hidden, 12-heads, 121M parameters (size ~500 MB). - [google/canine-s](https://huggingface.co/google/canine-s): Pre-trained with subword loss, 12-layer, 768-hidden, 12-heads, 121M parameters (size ~500 MB). ## Usage example CANINE works on raw characters, so it can be used **without a tokenizer**: ```python >>> from transformers import CanineModel >>> import torch >>> model = CanineModel.from_pretrained("google/canine-c") # model pre-trained with autoregressive character loss >>> text = "hello world" >>> # use Python's built-in ord() function to turn each character into its unicode code point id >>> input_ids = torch.tensor([[ord(char) for char in text]]) >>> outputs = model(input_ids) # forward pass >>> pooled_output = outputs.pooler_output >>> sequence_output = outputs.last_hidden_state ``` For batched inference and training, it is however recommended to make use of the tokenizer (to pad/truncate all sequences to the same length): ```python >>> from transformers import CanineTokenizer, CanineModel >>> model = CanineModel.from_pretrained("google/canine-c") >>> tokenizer = CanineTokenizer.from_pretrained("google/canine-c") >>> inputs = ["Life is like a box of chocolates.", "You never know what you gonna get."] >>> encoding = tokenizer(inputs, padding="longest", truncation=True, return_tensors="pt") >>> outputs = model(**encoding) # forward pass >>> pooled_output = outputs.pooler_output >>> sequence_output = outputs.last_hidden_state ``` ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Multiple choice task guide](../tasks/multiple_choice) ## CanineConfig [[autodoc]] CanineConfig ## CanineTokenizer [[autodoc]] CanineTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences ## CANINE specific outputs [[autodoc]] models.canine.modeling_canine.CanineModelOutputWithPooling ## CanineModel [[autodoc]] CanineModel - forward ## CanineForSequenceClassification [[autodoc]] CanineForSequenceClassification - forward ## CanineForMultipleChoice [[autodoc]] CanineForMultipleChoice - forward ## CanineForTokenClassification [[autodoc]] CanineForTokenClassification - forward ## CanineForQuestionAnswering [[autodoc]] CanineForQuestionAnswering - forward
transformers/docs/source/en/model_doc/canine.md/0
{ "file_path": "transformers/docs/source/en/model_doc/canine.md", "repo_id": "transformers", "token_count": 1723 }
269
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CTRL <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=ctrl"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-ctrl-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/tiny-ctrl"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview CTRL model was proposed in [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher. It's a causal (unidirectional) transformer pre-trained using language modeling on a very large corpus of ~140 GB of text data with the first token reserved as a control code (such as Links, Books, Wikipedia etc.). The abstract from the paper is the following: *Large-scale language models show promising text generation capabilities, but users cannot easily control particular aspects of the generated text. We release CTRL, a 1.63 billion-parameter conditional transformer language model, trained to condition on control codes that govern style, content, and task-specific behavior. Control codes were derived from structure that naturally co-occurs with raw text, preserving the advantages of unsupervised learning while providing more explicit control over text generation. These codes also allow CTRL to predict which parts of the training data are most likely given a sequence. This provides a potential method for analyzing large amounts of data via model-based source attribution.* This model was contributed by [keskarnitishr](https://huggingface.co/keskarnitishr). The original code can be found [here](https://github.com/salesforce/ctrl). ## Usage tips - CTRL makes use of control codes to generate text: it requires generations to be started by certain words, sentences or links to generate coherent text. Refer to the [original implementation](https://github.com/salesforce/ctrl) for more information. - CTRL is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - CTRL was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next token in a sequence. Leveraging this feature allows CTRL to generate syntactically coherent text as it can be observed in the *run_generation.py* example script. - The PyTorch models can take the `past_key_values` as input, which is the previously computed key/value attention pairs. TensorFlow models accepts `past` as input. Using the `past_key_values` value prevents the model from re-computing pre-computed values in the context of text generation. See the [`forward`](model_doc/ctrl#transformers.CTRLModel.forward) method for more information on the usage of this argument. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Causal language modeling task guide](../tasks/language_modeling) ## CTRLConfig [[autodoc]] CTRLConfig ## CTRLTokenizer [[autodoc]] CTRLTokenizer - save_vocabulary <frameworkcontent> <pt> ## CTRLModel [[autodoc]] CTRLModel - forward ## CTRLLMHeadModel [[autodoc]] CTRLLMHeadModel - forward ## CTRLForSequenceClassification [[autodoc]] CTRLForSequenceClassification - forward </pt> <tf> ## TFCTRLModel [[autodoc]] TFCTRLModel - call ## TFCTRLLMHeadModel [[autodoc]] TFCTRLLMHeadModel - call ## TFCTRLForSequenceClassification [[autodoc]] TFCTRLForSequenceClassification - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/ctrl.md/0
{ "file_path": "transformers/docs/source/en/model_doc/ctrl.md", "repo_id": "transformers", "token_count": 1209 }
270
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Dilated Neighborhood Attention Transformer ## Overview DiNAT was proposed in [Dilated Neighborhood Attention Transformer](https://arxiv.org/abs/2209.15001) by Ali Hassani and Humphrey Shi. It extends [NAT](nat) by adding a Dilated Neighborhood Attention pattern to capture global context, and shows significant performance improvements over it. The abstract from the paper is the following: *Transformers are quickly becoming one of the most heavily applied deep learning architectures across modalities, domains, and tasks. In vision, on top of ongoing efforts into plain transformers, hierarchical transformers have also gained significant attention, thanks to their performance and easy integration into existing frameworks. These models typically employ localized attention mechanisms, such as the sliding-window Neighborhood Attention (NA) or Swin Transformer's Shifted Window Self Attention. While effective at reducing self attention's quadratic complexity, local attention weakens two of the most desirable properties of self attention: long range inter-dependency modeling, and global receptive field. In this paper, we introduce Dilated Neighborhood Attention (DiNA), a natural, flexible and efficient extension to NA that can capture more global context and expand receptive fields exponentially at no additional cost. NA's local attention and DiNA's sparse global attention complement each other, and therefore we introduce Dilated Neighborhood Attention Transformer (DiNAT), a new hierarchical vision transformer built upon both. DiNAT variants enjoy significant improvements over strong baselines such as NAT, Swin, and ConvNeXt. Our large model is faster and ahead of its Swin counterpart by 1.5% box AP in COCO object detection, 1.3% mask AP in COCO instance segmentation, and 1.1% mIoU in ADE20K semantic segmentation. Paired with new frameworks, our large variant is the new state of the art panoptic segmentation model on COCO (58.2 PQ) and ADE20K (48.5 PQ), and instance segmentation model on Cityscapes (44.5 AP) and ADE20K (35.4 AP) (no extra data). It also matches the state of the art specialized semantic segmentation models on ADE20K (58.2 mIoU), and ranks second on Cityscapes (84.5 mIoU) (no extra data). * <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/dilated-neighborhood-attention-pattern.jpg" alt="drawing" width="600"/> <small> Neighborhood Attention with different dilation values. Taken from the <a href="https://arxiv.org/abs/2209.15001">original paper</a>.</small> This model was contributed by [Ali Hassani](https://huggingface.co/alihassanijr). The original code can be found [here](https://github.com/SHI-Labs/Neighborhood-Attention-Transformer). ## Usage tips DiNAT can be used as a *backbone*. When `output_hidden_states = True`, it will output both `hidden_states` and `reshaped_hidden_states`. The `reshaped_hidden_states` have a shape of `(batch, num_channels, height, width)` rather than `(batch_size, height, width, num_channels)`. Notes: - DiNAT depends on [NATTEN](https://github.com/SHI-Labs/NATTEN/)'s implementation of Neighborhood Attention and Dilated Neighborhood Attention. You can install it with pre-built wheels for Linux by referring to [shi-labs.com/natten](https://shi-labs.com/natten), or build on your system by running `pip install natten`. Note that the latter will likely take time to compile. NATTEN does not support Windows devices yet. - Patch size of 4 is only supported at the moment. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DiNAT. <PipelineTag pipeline="image-classification"/> - [`DinatForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## DinatConfig [[autodoc]] DinatConfig ## DinatModel [[autodoc]] DinatModel - forward ## DinatForImageClassification [[autodoc]] DinatForImageClassification - forward
transformers/docs/source/en/model_doc/dinat.md/0
{ "file_path": "transformers/docs/source/en/model_doc/dinat.md", "repo_id": "transformers", "token_count": 1371 }
271
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # FalconMamba ## Overview The FalconMamba model was proposed by TII UAE (Technology Innovation Institute) in their release. The abstract from the paper is the following: *We present FalconMamba, a new base large language model based on the novel Mamba architecture. FalconMamba is trained on 5.8 trillion tokens with carefully selected data mixtures. As a pure Mamba-based model, FalconMamba surpasses leading open-weight models based on Transformers, such as Mistral 7B, Llama3 8B, and Falcon2 11B. It is on par with Gemma 7B and outperforms models with different architecture designs, such as RecurrentGemma 9B. Currently, FalconMamba is the best-performing Mamba model in the literature at this scale, surpassing both existing Mamba and hybrid Mamba-Transformer models. Due to its architecture, FalconMamba is significantly faster at inference and requires substantially less memory for long sequence generation. Despite recent studies suggesting that hybrid Mamba-Transformer models outperform pure architecture designs, we argue and demonstrate that the pure Mamba design can achieve similar, even superior results compared to the hybrid design. We make the weights of our implementation of FalconMamba publicly available under a permissive license.* Tips: - FalconMamba is mostly based on Mamba architecutre, the same [tips and best practices](./mamba) would be relevant here. The model has been trained on approximtely 6T tokens consisting a mixture of many data sources such as RefineWeb, Cosmopedia and Math data. For more details about the training procedure and the architecture, have a look at [the technical paper of FalconMamba]() (coming soon). # Usage Below we demonstrate how to use the model: ```python from transformers import FalconMambaForCausalLM, AutoTokenizer import torch tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b") model = FalconMambaForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b") input_ids = tokenizer("Hey how are you doing?", return_tensors= "pt")["input_ids"] out = model.generate(input_ids, max_new_tokens=10) print(tokenizer.batch_decode(out)) ``` The architecture is also compatible with `torch.compile` for faster generation: ```python from transformers import FalconMambaForCausalLM, AutoTokenizer import torch tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b") model = FalconMambaForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b", torch_dtype=torch.bfloat16).to(0) model = torch.compile(model) input_ids = tokenizer("Hey how are you doing?", return_tensors= "pt")["input_ids"] out = model.generate(input_ids, max_new_tokens=10) print(tokenizer.batch_decode(out)) ``` If you have access to a GPU that is compatible with `bitsandbytes`, you can also quantize the model in 4-bit precision: ```python from transformers import FalconMambaForCausalLM, AutoTokenizer, BitsAndBytesConfig import torch tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b") quantization_config = BitsAndBytesConfig(load_in_4bit=True) model = FalconMambaForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b", quantization_config=quantization_config) input_ids = tokenizer("Hey how are you doing?", return_tensors= "pt")["input_ids"] out = model.generate(input_ids, max_new_tokens=10) print(tokenizer.batch_decode(out)) ``` You can also play with the instruction fine-tuned model: ```python from transformers import FalconMambaForCausalLM, AutoTokenizer import torch tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b-instruct") model = FalconMambaForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b-instruct") # We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating messages = [ {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, ] input_ids = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True).input_ids outputs = model.generate(input_ids) print(tokenizer.decode(outputs[0])) ``` ## FalconMambaConfig [[autodoc]] FalconMambaConfig ## FalconMambaModel [[autodoc]] FalconMambaModel - forward ## FalconMambaLMHeadModel [[autodoc]] FalconMambaForCausalLM - forward
transformers/docs/source/en/model_doc/falcon_mamba.md/0
{ "file_path": "transformers/docs/source/en/model_doc/falcon_mamba.md", "repo_id": "transformers", "token_count": 1454 }
272
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # OpenAI GPT2 <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=gpt2"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-gpt2-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/gpt2"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview OpenAI GPT-2 model was proposed in [Language Models are Unsupervised Multitask Learners](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) by Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei and Ilya Sutskever from [OpenAI](https://huggingface.co/openai). It's a causal (unidirectional) transformer pretrained using language modeling on a very large corpus of ~40 GB of text data. The abstract from the paper is the following: *GPT-2 is a large transformer-based language model with 1.5 billion parameters, trained on a dataset[1] of 8 million web pages. GPT-2 is trained with a simple objective: predict the next word, given all of the previous words within some text. The diversity of the dataset causes this simple goal to contain naturally occurring demonstrations of many tasks across diverse domains. GPT-2 is a direct scale-up of GPT, with more than 10X the parameters and trained on more than 10X the amount of data.* [Write With Transformer](https://transformer.huggingface.co/doc/gpt2-large) is a webapp created and hosted by Hugging Face showcasing the generative capabilities of several models. GPT-2 is one of them and is available in five different sizes: small, medium, large, xl and a distilled version of the small checkpoint: *distilgpt-2*. This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://openai.com/blog/better-language-models/). ## Usage tips - GPT-2 is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - GPT-2 was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next token in a sequence. Leveraging this feature allows GPT-2 to generate syntactically coherent text as it can be observed in the *run_generation.py* example script. - The model can take the *past_key_values* (for PyTorch) or *past* (for TF) as input, which is the previously computed key/value attention pairs. Using this (*past_key_values* or *past*) value prevents the model from re-computing pre-computed values in the context of text generation. For PyTorch, see *past_key_values* argument of the [`GPT2Model.forward`] method, or for TF the *past* argument of the [`TFGPT2Model.call`] method for more information on its usage. - Enabling the *scale_attn_by_inverse_layer_idx* and *reorder_and_upcast_attn* flags will apply the training stability improvements from [Mistral](https://github.com/stanford-crfm/mistral/) (for PyTorch only). ## Usage example The `generate()` method can be used to generate text using GPT2 model. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> prompt = "GPT2 is a model developed by OpenAI." >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids >>> gen_tokens = model.generate( ... input_ids, ... do_sample=True, ... temperature=0.9, ... max_length=100, ... ) >>> gen_text = tokenizer.batch_decode(gen_tokens)[0] ``` ## Using Flash Attention 2 Flash Attention 2 is a faster, optimized version of the attention scores computation which relies on `cuda` kernels. ### Installation First, check whether your hardware is compatible with Flash Attention 2. The latest list of compatible hardware can be found in the [official documentation](https://github.com/Dao-AILab/flash-attention#installation-and-features). If your hardware is not compatible with Flash Attention 2, you can still benefit from attention kernel optimisations through Better Transformer support covered [above](https://huggingface.co/docs/transformers/main/en/model_doc/bark#using-better-transformer). Next, [install](https://github.com/Dao-AILab/flash-attention#installation-and-features) the latest version of Flash Attention 2: ```bash pip install -U flash-attn --no-build-isolation ``` ### Usage To load a model using Flash Attention 2, we can pass the argument `attn_implementation="flash_attention_2"` to [`.from_pretrained`](https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.PreTrainedModel.from_pretrained). We'll also load the model in half-precision (e.g. `torch.float16`), since it results in almost no degradation to audio quality but significantly lower memory usage and faster inference: ```python >>> import torch >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> device = "cuda" # the device to load the model onto >>> model = AutoModelForCausalLM.from_pretrained("gpt2", torch_dtype=torch.float16, attn_implementation="flash_attention_2") >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> prompt = "def hello_world():" >>> model_inputs = tokenizer([prompt], return_tensors="pt").to(device) >>> model.to(device) >>> generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True) >>> tokenizer.batch_decode(generated_ids)[0] ``` ### Expected speedups Below is an expected speedup diagram that compares pure inference time between the native implementation in transformers using `gpt2` checkpoint and the Flash Attention 2 version of the model using a sequence length of 512. <div style="text-align: center"> <img src="https://huggingface.co/datasets/EduardoPacheco/documentation-images/resolve/main/gpt2_flash_attention_2_speedup.jpg"> </div> ## Using Scaled Dot Product Attention (SDPA) PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the [official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention) page for more information. SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set `attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used. ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("gpt2", torch_dtype=torch.float16, attn_implementation="sdpa") ... ``` For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`). On a local benchmark (rtx3080ti-16GB, PyTorch 2.2.1, OS Ubuntu 22.04) using `float16` with [gpt2-large](https://huggingface.co/openai-community/gpt2-large), we saw the following speedups during training and inference. ### Training | Batch size | Seq len | Time per batch (Eager - s) | Time per batch (SDPA - s) | Speedup (%) | Eager peak mem (MB) | SDPA peak mem (MB) | Mem saving (%) | |-----------:|--------:|----------------------------:|--------------------------:|------------:|--------------------:|-------------------:|------------------:| | 1 | 128 | 0.039 | 0.032 | 23.042 | 3482.32 | 3494.62 | -0.352 | | 1 | 256 | 0.073 | 0.059 | 25.15 | 3546.66 | 3552.6 | -0.167 | | 1 | 512 | 0.155 | 0.118 | 30.96 | 4230.1 | 3665.59 | 15.4 | | 1 | 1024 | 0.316 | 0.209 | 50.839 | 8682.26 | 4881.09 | 77.875 | | 2 | 128 | 0.07 | 0.06 | 15.324 | 3557.8 | 3545.91 | 0.335 | | 2 | 256 | 0.143 | 0.122 | 16.53 | 3901.5 | 3657.68 | 6.666 | | 2 | 512 | 0.267 | 0.213 | 25.626 | 7062.21 | 4876.47 | 44.822 | | 2 | 1024 | OOM | 0.404 | / | OOM | 8096.35 | SDPA does not OOM | | 4 | 128 | 0.134 | 0.128 | 4.412 | 3675.79 | 3648.72 | 0.742 | | 4 | 256 | 0.243 | 0.217 | 12.292 | 6129.76 | 4871.12 | 25.839 | | 4 | 512 | 0.494 | 0.406 | 21.687 | 12466.6 | 8102.64 | 53.858 | | 4 | 1024 | OOM | 0.795 | / | OOM | 14568.2 | SDPA does not OOM | ### Inference | Batch size | Seq len | Per token latency Eager (ms) | Per token latency SDPA (ms) | Speedup (%) | Mem Eager (MB) | Mem SDPA (MB) | Mem saved (%) | |-----------:|--------:|-----------------------------:|----------------------------:|------------:|---------------:|--------------:|--------------:| | 1 | 128 | 7.991 | 6.968 | 14.681 | 1685.2 | 1701.32 | -0.947 | | 1 | 256 | 8.462 | 7.199 | 17.536 | 1745.49 | 1770.78 | -1.428 | | 1 | 512 | 8.68 | 7.853 | 10.529 | 1907.69 | 1921.29 | -0.708 | | 1 | 768 | 9.101 | 8.365 | 8.791 | 2032.93 | 2068.12 | -1.701 | | 2 | 128 | 9.169 | 9.001 | 1.861 | 1803.84 | 1811.4 | -0.418 | | 2 | 256 | 9.907 | 9.78 | 1.294 | 1907.72 | 1921.44 | -0.714 | | 2 | 512 | 11.519 | 11.644 | -1.071 | 2176.86 | 2197.75 | -0.951 | | 2 | 768 | 13.022 | 13.407 | -2.873 | 2464.3 | 2491.06 | -1.074 | | 4 | 128 | 10.097 | 9.831 | 2.709 | 1942.25 | 1985.13 | -2.16 | | 4 | 256 | 11.599 | 11.398 | 1.764 | 2177.28 | 2197.86 | -0.937 | | 4 | 512 | 14.653 | 14.45 | 1.411 | 2753.16 | 2772.57 | -0.7 | | 4 | 768 | 17.846 | 17.617 | 1.299 | 3327.04 | 3343.97 | -0.506 | ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with GPT2. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-generation"/> - A blog on how to [Finetune a non-English GPT-2 Model with Hugging Face](https://www.philschmid.de/fine-tune-a-non-english-gpt-2-model-with-huggingface). - A blog on [How to generate text: using different decoding methods for language generation with Transformers](https://huggingface.co/blog/how-to-generate) with GPT-2. - A blog on [Training CodeParrot 🦜 from Scratch](https://huggingface.co/blog/codeparrot), a large GPT-2 model. - A blog on [Faster Text Generation with TensorFlow and XLA](https://huggingface.co/blog/tf-xla-generate) with GPT-2. - A blog on [How to train a Language Model with Megatron-LM](https://huggingface.co/blog/megatron-training) with a GPT-2 model. - A notebook on how to [finetune GPT2 to generate lyrics in the style of your favorite artist](https://colab.research.google.com/github/AlekseyKorshuk/huggingartists/blob/master/huggingartists-demo.ipynb). 🌎 - A notebook on how to [finetune GPT2 to generate tweets in the style of your favorite Twitter user](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb). 🌎 - [Causal language modeling](https://huggingface.co/course/en/chapter7/6?fw=pt#training-a-causal-language-model-from-scratch) chapter of the 🤗 Hugging Face Course. - [`GPT2LMHeadModel`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#gpt-2gpt-and-causal-language-modeling), [text generation example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation), and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). - [`TFGPT2LMHeadModel`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_clmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). - [`FlaxGPT2LMHeadModel`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#causal-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/causal_language_modeling_flax.ipynb). - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Causal language modeling task guide](../tasks/language_modeling) ## GPT2Config [[autodoc]] GPT2Config ## GPT2Tokenizer [[autodoc]] GPT2Tokenizer - save_vocabulary ## GPT2TokenizerFast [[autodoc]] GPT2TokenizerFast ## GPT2 specific outputs [[autodoc]] models.gpt2.modeling_gpt2.GPT2DoubleHeadsModelOutput [[autodoc]] models.gpt2.modeling_tf_gpt2.TFGPT2DoubleHeadsModelOutput <frameworkcontent> <pt> ## GPT2Model [[autodoc]] GPT2Model - forward ## GPT2LMHeadModel [[autodoc]] GPT2LMHeadModel - forward ## GPT2DoubleHeadsModel [[autodoc]] GPT2DoubleHeadsModel - forward ## GPT2ForQuestionAnswering [[autodoc]] GPT2ForQuestionAnswering - forward ## GPT2ForSequenceClassification [[autodoc]] GPT2ForSequenceClassification - forward ## GPT2ForTokenClassification [[autodoc]] GPT2ForTokenClassification - forward </pt> <tf> ## TFGPT2Model [[autodoc]] TFGPT2Model - call ## TFGPT2LMHeadModel [[autodoc]] TFGPT2LMHeadModel - call ## TFGPT2DoubleHeadsModel [[autodoc]] TFGPT2DoubleHeadsModel - call ## TFGPT2ForSequenceClassification [[autodoc]] TFGPT2ForSequenceClassification - call ## TFSequenceClassifierOutputWithPast [[autodoc]] modeling_tf_outputs.TFSequenceClassifierOutputWithPast ## TFGPT2Tokenizer [[autodoc]] TFGPT2Tokenizer </tf> <jax> ## FlaxGPT2Model [[autodoc]] FlaxGPT2Model - __call__ ## FlaxGPT2LMHeadModel [[autodoc]] FlaxGPT2LMHeadModel - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/gpt2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/gpt2.md", "repo_id": "transformers", "token_count": 7010 }
273
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Idefics2 ## Overview The Idefics2 model was proposed in [What matters when building vision-language models?](https://arxiv.org/abs/2405.02246) by Léo Tronchon, Hugo Laurencon, Victor Sanh. The accompanying blog post can be found [here](https://huggingface.co/blog/idefics2). Idefics2 is an open multimodal model that accepts arbitrary sequences of image and text inputs and produces text outputs. The model can answer questions about images, describe visual content, create stories grounded on multiple images, or simply behave as a pure language model without visual inputs. It improves upon IDEFICS-1, notably on document understanding, OCR, or visual reasoning. Idefics2 is lightweight (8 billion parameters) and treats images in their native aspect ratio and resolution, which allows for varying inference efficiency. The abstract from the paper is the following: *The growing interest in vision-language models (VLMs) has been driven by improvements in large language models and vision transformers. Despite the abundance of literature on this subject, we observe that critical decisions regarding the design of VLMs are often not justified. We argue that these unsupported decisions impede progress in the field by making it difficult to identify which choices improve model performance. To address this issue, we conduct extensive experiments around pre-trained models, architecture choice, data, and training methods. Our consolidation of findings includes the development of Idefics2, an efficient foundational VLM of 8 billion parameters. Idefics2 achieves state-of-the-art performance within its size category across various multimodal benchmarks, and is often on par with models four times its size. We release the model (base, instructed, and chat) along with the datasets created for its training.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/idefics2_architecture.png" alt="drawing" width="600"/> <small> Idefics2 architecture. Taken from the <a href="https://arxiv.org/abs/2405.02246">original paper.</a> </small> This model was contributed by [amyeroberts](https://huggingface.co/amyeroberts). The original code can be found [here](https://huggingface.co/HuggingFaceM4/idefics2). ## Usage tips - Each sample can contain multiple images, and the number of images can vary between samples. The processor will pad the inputs to the maximum number of images in a batch for input to the model. - The processor has a `do_image_splitting` option. If `True`, each input image will be split into 4 sub-images, and concatenated with the original to form 5 images. This is useful for increasing model performance. Make sure `processor.image_processor.do_image_splitting` is set to `False` if the model was not trained with this option. - `text` passed to the processor should have the `<image>` tokens where the images should be inserted. And `<end_of_utterance>` at the end of each utterance if the text is a chat message. - The processor has its own `apply_chat_template` method to convert chat messages to text that can then be passed as `text` to the processor. Example of how to use the processor on chat messages: ```python import requests from PIL import Image from transformers import Idefics2Processor, Idefics2ForConditionalGeneration import torch device = "cuda" if torch.cuda.is_available() else "cpu" url_1 = "http://images.cocodataset.org/val2017/000000039769.jpg" url_2 = "http://images.cocodataset.org/val2017/000000219578.jpg" image_1 = Image.open(requests.get(url_1, stream=True).raw) image_2 = Image.open(requests.get(url_2, stream=True).raw) images = [image_1, image_2] messages = [{ "role": "user", "content": [ {"type": "text", "text": "What’s the difference between these two images?"}, {"type": "image"}, {"type": "image"}, ], }] processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b") model = Idefics2ForConditionalGeneration.from_pretrained("HuggingFaceM4/idefics2-8b") model.to(device) # at inference time, one needs to pass `add_generation_prompt=True` in order to make sure the model completes the prompt text = processor.apply_chat_template(messages, add_generation_prompt=True) print(text) # 'User: What’s the difference between these two images?<image><image><end_of_utterance>\nAssistant:' inputs = processor(images=images, text=text, return_tensors="pt").to(device) generated_text = model.generate(**inputs, max_new_tokens=500) generated_text = processor.batch_decode(generated_text, skip_special_tokens=True)[0] print("Generated text:", generated_text) ``` - During training, it's important to determine which tokens the model should not learn. For Idefics2, this typically comes down to the image and padding tokens. This means that one can create the labels as follows: ```python import requests from PIL import Image from transformers import Idefics2Processor, Idefics2ForConditionalGeneration import torch url_1 = "http://images.cocodataset.org/val2017/000000039769.jpg" url_2 = "http://images.cocodataset.org/val2017/000000219578.jpg" image_1 = Image.open(requests.get(url_1, stream=True).raw) image_2 = Image.open(requests.get(url_2, stream=True).raw) images = [image_1, image_2] messages = [{ "role": "user", "content": [ {"type": "text", "text": "What’s the difference between these two images?"}, {"type": "image"}, {"type": "image"}, ], }, { "role": "assistant", "content": [ {"type": "text", "text": "The difference is that one image is about dogs and the other one about cats."}, ], }] device = "cuda" if torch.cuda.is_available() else "cpu" processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b") model = Idefics2ForConditionalGeneration.from_pretrained("HuggingFaceM4/idefics2-8b") model.to(device) text = processor.apply_chat_template(messages, add_generation_prompt=False) inputs = processor(images=images, text=text, return_tensors="pt").to(device) labels = inputs.input_ids.clone() labels[labels == processor.tokenizer.pad_token_id] = -100 labels[labels == model.config.image_token_id] = -100 inputs["labels"] = labels outputs = model(**inputs) loss = outputs.loss loss.backward() ``` Do note that when training Idefics2 on multi-turn conversations between a user and an assistant, one typically also sets all the tokens corresponding to the user messages to -100. ## Model optimizations: Flash Attention The code snippets above showcase inference without any optimization tricks. However, one can drastically speed up the model by leveraging [Flash Attention](../perf_train_gpu_one.md#flash-attention-2), which is a faster implementation of the attention mechanism used inside the model. First, make sure to install the latest version of Flash Attention 2 to include the sliding window attention feature. ```bash pip install -U flash-attn --no-build-isolation ``` Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of the [flash attention repository](https://github.com/Dao-AILab/flash-attention). Make also sure to load your model in half-precision (e.g. `torch.float16`) To load and run a model using Flash Attention-2, simply change the code snippet above with the following change: ```diff model = Idefics2ForConditionalGeneration.from_pretrained( "HuggingFaceM4/idefics2-8b", + torch_dtype=torch.float16, + attn_implementation="flash_attention_2", ).to(device) ``` ## Shrinking down Idefics2 using quantization As the Idefics2 model has 8 billion parameters, that would require about 16GB of GPU RAM in half precision (float16), since each parameter is stored in 2 bytes. However, one can shrink down the size of the model using [quantization](../quantization.md). If the model is quantized to 4 bits (or half a byte per parameter), that requires only about 3.5GB of RAM. Quantizing a model is as simple as passing a `quantization_config` to the model. One can change the code snippet above with the changes below. We'll leverage the BitsAndyBytes quantization (but refer to [this page](../quantization.md) for other quantization methods): ```diff + from transformers import BitsAndBytesConfig + quantization_config = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_quant_type="nf4", + bnb_4bit_use_double_quant=True, + bnb_4bit_compute_dtype=torch.float16 + ) model = Idefics2ForConditionalGeneration.from_pretrained( "HuggingFaceM4/idefics2-8b", + torch_dtype=torch.float16, + quantization_config=quantization_config, ).to(device) ``` ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Idefics2. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. - A notebook on how to fine-tune Idefics2 on a custom dataset using the [Trainer](../main_classes/trainer.md) can be found [here](https://colab.research.google.com/drive/1NtcTgRbSBKN7pYD3Vdx1j9m8pt3fhFDB?usp=sharing). It supports both full fine-tuning as well as (quantized) LoRa. - A script regarding how to fine-tune Idefics2 using the TRL library can be found [here](https://gist.github.com/edbeeching/228652fc6c2b29a1641be5a5778223cb). - Demo notebook regarding fine-tuning Idefics2 for JSON extraction use cases can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Idefics2). 🌎 ## Idefics2Config [[autodoc]] Idefics2Config ## Idefics2Model [[autodoc]] Idefics2Model - forward ## Idefics2ForConditionalGeneration [[autodoc]] Idefics2ForConditionalGeneration - forward ## Idefics2ImageProcessor [[autodoc]] Idefics2ImageProcessor - preprocess ## Idefics2Processor [[autodoc]] Idefics2Processor - __call__
transformers/docs/source/en/model_doc/idefics2.md/0
{ "file_path": "transformers/docs/source/en/model_doc/idefics2.md", "repo_id": "transformers", "token_count": 3215 }
274
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LLaMA ## Overview The LLaMA model was proposed in [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample. It is a collection of foundation language models ranging from 7B to 65B parameters. The abstract from the paper is the following: *We introduce LLaMA, a collection of foundation language models ranging from 7B to 65B parameters. We train our models on trillions of tokens, and show that it is possible to train state-of-the-art models using publicly available datasets exclusively, without resorting to proprietary and inaccessible datasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks, and LLaMA-65B is competitive with the best models, Chinchilla-70B and PaLM-540B. We release all our models to the research community. * This model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). ## Usage tips - Weights for the LLaMA models can be obtained from by filling out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform?usp=send_form) - After downloading the weights, they will need to be converted to the Hugging Face Transformers format using the [conversion script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py). The script can be called with the following (example) command: ```bash python src/transformers/models/llama/convert_llama_weights_to_hf.py \ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path ``` - After conversion, the model and tokenizer can be loaded via: ```python from transformers import LlamaForCausalLM, LlamaTokenizer tokenizer = LlamaTokenizer.from_pretrained("/output/path") model = LlamaForCausalLM.from_pretrained("/output/path") ``` Note that executing the script requires enough CPU RAM to host the whole model in float16 precision (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). For the 65B model, it's thus 130GB of RAM needed. - The LLaMA tokenizer is a BPE model based on [sentencepiece](https://github.com/google/sentencepiece). One quirk of sentencepiece is that when decoding a sequence, if the first token is the start of the word (e.g. "Banana"), the tokenizer does not prepend the prefix space to the string. This model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). The Flax version of the implementation was contributed by [afmck](https://huggingface.co/afmck) with the code in the implementation based on Hugging Face's Flax GPT-Neo. Based on the original LLaMA model, Meta AI has released some follow-up works: - **Llama2**: Llama2 is an improved version of Llama with some architectural tweaks (Grouped Query Attention), and is pre-trained on 2Trillion tokens. Refer to the documentation of Llama2 which can be found [here](llama2). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LLaMA. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-classification"/> - A [notebook](https://colab.research.google.com/github/bigscience-workshop/petals/blob/main/examples/prompt-tuning-sst2.ipynb#scrollTo=f04ba4d2) on how to use prompt tuning to adapt the LLaMA model for text classification task. 🌎 <PipelineTag pipeline="question-answering"/> - [StackLLaMA: A hands-on guide to train LLaMA with RLHF](https://huggingface.co/blog/stackllama#stackllama-a-hands-on-guide-to-train-llama-with-rlhf), a blog post about how to train LLaMA to answer questions on [Stack Exchange](https://stackexchange.com/) with RLHF. ⚗️ Optimization - A [notebook](https://colab.research.google.com/drive/1SQUXq1AMZPSLD4mk3A3swUIc6Y2dclme?usp=sharing) on how to fine-tune LLaMA model using xturing library on GPU which has limited memory. 🌎 ⚡️ Inference - A [notebook](https://colab.research.google.com/github/DominguesM/alpaca-lora-ptbr-7b/blob/main/notebooks/02%20-%20Evaluate.ipynb) on how to run the LLaMA Model using PeftModel from the 🤗 PEFT library. 🌎 - A [notebook](https://colab.research.google.com/drive/1l2GiSSPbajVyp2Nk3CFT4t3uH6-5TiBe?usp=sharing) on how to load a PEFT adapter LLaMA model with LangChain. 🌎 🚀 Deploy - A [notebook](https://colab.research.google.com/github/lxe/simple-llama-finetuner/blob/master/Simple_LLaMA_FineTuner.ipynb#scrollTo=3PM_DilAZD8T) on how to fine-tune LLaMA model using LoRA method via the 🤗 PEFT library with intuitive UI. 🌎 - A [notebook](https://github.com/aws/amazon-sagemaker-examples/blob/main/introduction_to_amazon_algorithms/jumpstart-foundation-models/text-generation-open-llama.ipynb) on how to deploy Open-LLaMA model for text generation on Amazon SageMaker. 🌎 ## LlamaConfig [[autodoc]] LlamaConfig ## LlamaTokenizer [[autodoc]] LlamaTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## LlamaTokenizerFast [[autodoc]] LlamaTokenizerFast - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - update_post_processor - save_vocabulary ## LlamaModel [[autodoc]] LlamaModel - forward ## LlamaForCausalLM [[autodoc]] LlamaForCausalLM - forward ## LlamaForSequenceClassification [[autodoc]] LlamaForSequenceClassification - forward ## LlamaForQuestionAnswering [[autodoc]] LlamaForQuestionAnswering - forward ## LlamaForTokenClassification [[autodoc]] LlamaForTokenClassification - forward ## FlaxLlamaModel [[autodoc]] FlaxLlamaModel - __call__ ## FlaxLlamaForCausalLM [[autodoc]] FlaxLlamaForCausalLM - __call__
transformers/docs/source/en/model_doc/llama.md/0
{ "file_path": "transformers/docs/source/en/model_doc/llama.md", "repo_id": "transformers", "token_count": 2384 }
275
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Mask2Former ## Overview The Mask2Former model was proposed in [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar. Mask2Former is a unified framework for panoptic, instance and semantic segmentation and features significant performance and efficiency improvements over [MaskFormer](maskformer). The abstract from the paper is the following: *Image segmentation groups pixels with different semantics, e.g., category or instance membership. Each choice of semantics defines a task. While only the semantics of each task differ, current research focuses on designing specialized architectures for each task. We present Masked-attention Mask Transformer (Mask2Former), a new architecture capable of addressing any image segmentation task (panoptic, instance or semantic). Its key components include masked attention, which extracts localized features by constraining cross-attention within predicted mask regions. In addition to reducing the research effort by at least three times, it outperforms the best specialized architectures by a significant margin on four popular datasets. Most notably, Mask2Former sets a new state-of-the-art for panoptic segmentation (57.8 PQ on COCO), instance segmentation (50.1 AP on COCO) and semantic segmentation (57.7 mIoU on ADE20K).* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/mask2former_architecture.jpg" alt="drawing" width="600"/> <small> Mask2Former architecture. Taken from the <a href="https://arxiv.org/abs/2112.01527">original paper.</a> </small> This model was contributed by [Shivalika Singh](https://huggingface.co/shivi) and [Alara Dirik](https://huggingface.co/adirik). The original code can be found [here](https://github.com/facebookresearch/Mask2Former). ## Usage tips - Mask2Former uses the same preprocessing and postprocessing steps as [MaskFormer](maskformer). Use [`Mask2FormerImageProcessor`] or [`AutoImageProcessor`] to prepare images and optional targets for the model. - To get the final segmentation, depending on the task, you can call [`~Mask2FormerImageProcessor.post_process_semantic_segmentation`] or [`~Mask2FormerImageProcessor.post_process_instance_segmentation`] or [`~Mask2FormerImageProcessor.post_process_panoptic_segmentation`]. All three tasks can be solved using [`Mask2FormerForUniversalSegmentation`] output, panoptic segmentation accepts an optional `label_ids_to_fuse` argument to fuse instances of the target object/s (e.g. sky) together. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Mask2Former. - Demo notebooks regarding inference + fine-tuning Mask2Former on custom data can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Mask2Former). - Scripts for finetuning [`Mask2Former`] with [`Trainer`] or [Accelerate](https://huggingface.co/docs/accelerate/index) can be found [here](https://github.com/huggingface/transformers/tree/main/examples/pytorch/instance-segmentation). If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we will review it. The resource should ideally demonstrate something new instead of duplicating an existing resource. ## Mask2FormerConfig [[autodoc]] Mask2FormerConfig ## MaskFormer specific outputs [[autodoc]] models.mask2former.modeling_mask2former.Mask2FormerModelOutput [[autodoc]] models.mask2former.modeling_mask2former.Mask2FormerForUniversalSegmentationOutput ## Mask2FormerModel [[autodoc]] Mask2FormerModel - forward ## Mask2FormerForUniversalSegmentation [[autodoc]] Mask2FormerForUniversalSegmentation - forward ## Mask2FormerImageProcessor [[autodoc]] Mask2FormerImageProcessor - preprocess - encode_inputs - post_process_semantic_segmentation - post_process_instance_segmentation - post_process_panoptic_segmentation
transformers/docs/source/en/model_doc/mask2former.md/0
{ "file_path": "transformers/docs/source/en/model_doc/mask2former.md", "repo_id": "transformers", "token_count": 1301 }
276
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # PhoBERT ## Overview The PhoBERT model was proposed in [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92.pdf) by Dat Quoc Nguyen, Anh Tuan Nguyen. The abstract from the paper is the following: *We present PhoBERT with two versions, PhoBERT-base and PhoBERT-large, the first public large-scale monolingual language models pre-trained for Vietnamese. Experimental results show that PhoBERT consistently outperforms the recent best pre-trained multilingual model XLM-R (Conneau et al., 2020) and improves the state-of-the-art in multiple Vietnamese-specific NLP tasks including Part-of-speech tagging, Dependency parsing, Named-entity recognition and Natural language inference.* This model was contributed by [dqnguyen](https://huggingface.co/dqnguyen). The original code can be found [here](https://github.com/VinAIResearch/PhoBERT). ## Usage example ```python >>> import torch >>> from transformers import AutoModel, AutoTokenizer >>> phobert = AutoModel.from_pretrained("vinai/phobert-base") >>> tokenizer = AutoTokenizer.from_pretrained("vinai/phobert-base") >>> # INPUT TEXT MUST BE ALREADY WORD-SEGMENTED! >>> line = "Tôi là sinh_viên trường đại_học Công_nghệ ." >>> input_ids = torch.tensor([tokenizer.encode(line)]) >>> with torch.no_grad(): ... features = phobert(input_ids) # Models outputs are now tuples >>> # With TensorFlow 2.0+: >>> # from transformers import TFAutoModel >>> # phobert = TFAutoModel.from_pretrained("vinai/phobert-base") ``` <Tip> PhoBERT implementation is the same as BERT, except for tokenization. Refer to [EART documentation](bert) for information on configuration classes and their parameters. PhoBERT-specific tokenizer is documented below. </Tip> ## PhobertTokenizer [[autodoc]] PhobertTokenizer
transformers/docs/source/en/model_doc/phobert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/phobert.md", "repo_id": "transformers", "token_count": 776 }
277
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Reformer <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=reformer"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-reformer-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/reformer-crime-and-punishment"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The Reformer model was proposed in the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451.pdf) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya. The abstract from the paper is the following: *Large Transformer models routinely achieve state-of-the-art results on a number of tasks but training these models can be prohibitively costly, especially on long sequences. We introduce two techniques to improve the efficiency of Transformers. For one, we replace dot-product attention by one that uses locality-sensitive hashing, changing its complexity from O(L^2) to O(Llog(L)), where L is the length of the sequence. Furthermore, we use reversible residual layers instead of the standard residuals, which allows storing activations only once in the training process instead of N times, where N is the number of layers. The resulting model, the Reformer, performs on par with Transformer models while being much more memory-efficient and much faster on long sequences.* This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be found [here](https://github.com/google/trax/tree/master/trax/models/reformer). ## Usage tips - Reformer does **not** work with *torch.nn.DataParallel* due to a bug in PyTorch, see [issue #36035](https://github.com/pytorch/pytorch/issues/36035). - Use Axial position encoding (see below for more details). It’s a mechanism to avoid having a huge positional encoding matrix (when the sequence length is very big) by factorizing it into smaller matrices. - Replace traditional attention by LSH (local-sensitive hashing) attention (see below for more details). It’s a technique to avoid computing the full product query-key in the attention layers. - Avoid storing the intermediate results of each layer by using reversible transformer layers to obtain them during the backward pass (subtracting the residuals from the input of the next layer gives them back) or recomputing them for results inside a given layer (less efficient than storing them but saves memory). - Compute the feedforward operations by chunks and not on the whole batch. ### Axial Positional Encodings Axial Positional Encodings were first implemented in Google's [trax library](https://github.com/google/trax/blob/4d99ad4965bab1deba227539758d59f0df0fef48/trax/layers/research/position_encodings.py#L29) and developed by the authors of this model's paper. In models that are treating very long input sequences, the conventional position id encodings store an embeddings vector of size \\(d\\) being the `config.hidden_size` for every position \\(i, \ldots, n_s\\), with \\(n_s\\) being `config.max_embedding_size`. This means that having a sequence length of \\(n_s = 2^{19} \approx 0.5M\\) and a `config.hidden_size` of \\(d = 2^{10} \approx 1000\\) would result in a position encoding matrix: $$X_{i,j}, \text{ with } i \in \left[1,\ldots, d\right] \text{ and } j \in \left[1,\ldots, n_s\right]$$ which alone has over 500M parameters to store. Axial positional encodings factorize \\(X_{i,j}\\) into two matrices: $$X^{1}_{i,j}, \text{ with } i \in \left[1,\ldots, d^1\right] \text{ and } j \in \left[1,\ldots, n_s^1\right]$$ and $$X^{2}_{i,j}, \text{ with } i \in \left[1,\ldots, d^2\right] \text{ and } j \in \left[1,\ldots, n_s^2\right]$$ with: $$d = d^1 + d^2 \text{ and } n_s = n_s^1 \times n_s^2 .$$ Therefore the following holds: $$X_{i,j} = \begin{cases} X^{1}_{i, k}, & \text{if }\ i < d^1 \text{ with } k = j \mod n_s^1 \\ X^{2}_{i - d^1, l}, & \text{if } i \ge d^1 \text{ with } l = \lfloor\frac{j}{n_s^1}\rfloor \end{cases}$$ Intuitively, this means that a position embedding vector \\(x_j \in \mathbb{R}^{d}\\) is now the composition of two factorized embedding vectors: \\(x^1_{k, l} + x^2_{l, k}\\), where as the `config.max_embedding_size` dimension \\(j\\) is factorized into \\(k \text{ and } l\\). This design ensures that each position embedding vector \\(x_j\\) is unique. Using the above example again, axial position encoding with \\(d^1 = 2^9, d^2 = 2^9, n_s^1 = 2^9, n_s^2 = 2^{10}\\) can drastically reduced the number of parameters from 500 000 000 to \\(2^{18} + 2^{19} \approx 780 000\\) parameters, this means 85% less memory usage. In practice, the parameter `config.axial_pos_embds_dim` is set to a tuple \\((d^1, d^2)\\) which sum has to be equal to `config.hidden_size` and `config.axial_pos_shape` is set to a tuple \\((n_s^1, n_s^2)\\) which product has to be equal to `config.max_embedding_size`, which during training has to be equal to the *sequence length* of the `input_ids`. ### LSH Self Attention In Locality sensitive hashing (LSH) self attention the key and query projection weights are tied. Therefore, the key query embedding vectors are also tied. LSH self attention uses the locality sensitive hashing mechanism proposed in [Practical and Optimal LSH for Angular Distance](https://arxiv.org/abs/1509.02897) to assign each of the tied key query embedding vectors to one of `config.num_buckets` possible buckets. The premise is that the more "similar" key query embedding vectors (in terms of *cosine similarity*) are to each other, the more likely they are assigned to the same bucket. The accuracy of the LSH mechanism can be improved by increasing `config.num_hashes` or directly the argument `num_hashes` of the forward function so that the output of the LSH self attention better approximates the output of the "normal" full self attention. The buckets are then sorted and chunked into query key embedding vector chunks each of length `config.lsh_chunk_length`. For each chunk, the query embedding vectors attend to its key vectors (which are tied to themselves) and to the key embedding vectors of `config.lsh_num_chunks_before` previous neighboring chunks and `config.lsh_num_chunks_after` following neighboring chunks. For more information, see the [original Paper](https://arxiv.org/abs/2001.04451) or this great [blog post](https://www.pragmatic.ml/reformer-deep-dive/). Note that `config.num_buckets` can also be factorized into a list \\((n_{\text{buckets}}^1, n_{\text{buckets}}^2)\\). This way instead of assigning the query key embedding vectors to one of \\((1,\ldots, n_{\text{buckets}})\\) they are assigned to one of \\((1-1,\ldots, n_{\text{buckets}}^1-1, \ldots, 1-n_{\text{buckets}}^2, \ldots, n_{\text{buckets}}^1-n_{\text{buckets}}^2)\\). This is crucial for very long sequences to save memory. When training a model from scratch, it is recommended to leave `config.num_buckets=None`, so that depending on the sequence length a good value for `num_buckets` is calculated on the fly. This value will then automatically be saved in the config and should be reused for inference. Using LSH self attention, the memory and time complexity of the query-key matmul operation can be reduced from \\(\mathcal{O}(n_s \times n_s)\\) to \\(\mathcal{O}(n_s \times \log(n_s))\\), which usually represents the memory and time bottleneck in a transformer model, with \\(n_s\\) being the sequence length. ### Local Self Attention Local self attention is essentially a "normal" self attention layer with key, query and value projections, but is chunked so that in each chunk of length `config.local_chunk_length` the query embedding vectors only attends to the key embedding vectors in its chunk and to the key embedding vectors of `config.local_num_chunks_before` previous neighboring chunks and `config.local_num_chunks_after` following neighboring chunks. Using Local self attention, the memory and time complexity of the query-key matmul operation can be reduced from \\(\mathcal{O}(n_s \times n_s)\\) to \\(\mathcal{O}(n_s \times \log(n_s))\\), which usually represents the memory and time bottleneck in a transformer model, with \\(n_s\\) being the sequence length. ### Training During training, we must ensure that the sequence length is set to a value that can be divided by the least common multiple of `config.lsh_chunk_length` and `config.local_chunk_length` and that the parameters of the Axial Positional Encodings are correctly set as described above. Reformer is very memory efficient so that the model can easily be trained on sequences as long as 64000 tokens. For training, the [`ReformerModelWithLMHead`] should be used as follows: ```python input_ids = tokenizer.encode("This is a sentence from the training data", return_tensors="pt") loss = model(input_ids, labels=input_ids)[0] ``` ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) ## ReformerConfig [[autodoc]] ReformerConfig ## ReformerTokenizer [[autodoc]] ReformerTokenizer - save_vocabulary ## ReformerTokenizerFast [[autodoc]] ReformerTokenizerFast ## ReformerModel [[autodoc]] ReformerModel - forward ## ReformerModelWithLMHead [[autodoc]] ReformerModelWithLMHead - forward ## ReformerForMaskedLM [[autodoc]] ReformerForMaskedLM - forward ## ReformerForSequenceClassification [[autodoc]] ReformerForSequenceClassification - forward ## ReformerForQuestionAnswering [[autodoc]] ReformerForQuestionAnswering - forward
transformers/docs/source/en/model_doc/reformer.md/0
{ "file_path": "transformers/docs/source/en/model_doc/reformer.md", "repo_id": "transformers", "token_count": 3186 }
278
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # SEW-D ## Overview SEW-D (Squeezed and Efficient Wav2Vec with Disentangled attention) was proposed in [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi. The abstract from the paper is the following: *This paper is a study of performance-efficiency trade-offs in pre-trained models for automatic speech recognition (ASR). We focus on wav2vec 2.0, and formalize several architecture designs that influence both the model performance and its efficiency. Putting together all our observations, we introduce SEW (Squeezed and Efficient Wav2vec), a pre-trained model architecture with significant improvements along both performance and efficiency dimensions across a variety of training setups. For example, under the 100h-960h semi-supervised setup on LibriSpeech, SEW achieves a 1.9x inference speedup compared to wav2vec 2.0, with a 13.5% relative reduction in word error rate. With a similar inference time, SEW reduces word error rate by 25-50% across different model sizes.* This model was contributed by [anton-l](https://huggingface.co/anton-l). ## Usage tips - SEW-D is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. - SEWDForCTC is fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. ## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) ## SEWDConfig [[autodoc]] SEWDConfig ## SEWDModel [[autodoc]] SEWDModel - forward ## SEWDForCTC [[autodoc]] SEWDForCTC - forward ## SEWDForSequenceClassification [[autodoc]] SEWDForSequenceClassification - forward
transformers/docs/source/en/model_doc/sew-d.md/0
{ "file_path": "transformers/docs/source/en/model_doc/sew-d.md", "repo_id": "transformers", "token_count": 732 }
279
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # UniSpeech-SAT ## Overview The UniSpeech-SAT model was proposed in [UniSpeech-SAT: Universal Speech Representation Learning with Speaker Aware Pre-Training](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu . The abstract from the paper is the following: *Self-supervised learning (SSL) is a long-standing goal for speech processing, since it utilizes large-scale unlabeled data and avoids extensive human labeling. Recent years witness great successes in applying self-supervised learning in speech recognition, while limited exploration was attempted in applying SSL for modeling speaker characteristics. In this paper, we aim to improve the existing SSL framework for speaker representation learning. Two methods are introduced for enhancing the unsupervised speaker information extraction. First, we apply the multi-task learning to the current SSL framework, where we integrate the utterance-wise contrastive loss with the SSL objective function. Second, for better speaker discrimination, we propose an utterance mixing strategy for data augmentation, where additional overlapped utterances are created unsupervisedly and incorporate during training. We integrate the proposed methods into the HuBERT framework. Experiment results on SUPERB benchmark show that the proposed system achieves state-of-the-art performance in universal representation learning, especially for speaker identification oriented tasks. An ablation study is performed verifying the efficacy of each proposed method. Finally, we scale up training dataset to 94 thousand hours public audio data and achieve further performance improvement in all SUPERB tasks.* This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be found [here](https://github.com/microsoft/UniSpeech/tree/main/UniSpeech-SAT). ## Usage tips - UniSpeechSat is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use [`Wav2Vec2Processor`] for the feature extraction. - UniSpeechSat model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. - UniSpeechSat performs especially well on speaker verification, speaker identification, and speaker diarization tasks. ## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) ## UniSpeechSatConfig [[autodoc]] UniSpeechSatConfig ## UniSpeechSat specific outputs [[autodoc]] models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput ## UniSpeechSatModel [[autodoc]] UniSpeechSatModel - forward ## UniSpeechSatForCTC [[autodoc]] UniSpeechSatForCTC - forward ## UniSpeechSatForSequenceClassification [[autodoc]] UniSpeechSatForSequenceClassification - forward ## UniSpeechSatForAudioFrameClassification [[autodoc]] UniSpeechSatForAudioFrameClassification - forward ## UniSpeechSatForXVector [[autodoc]] UniSpeechSatForXVector - forward ## UniSpeechSatForPreTraining [[autodoc]] UniSpeechSatForPreTraining - forward
transformers/docs/source/en/model_doc/unispeech-sat.md/0
{ "file_path": "transformers/docs/source/en/model_doc/unispeech-sat.md", "repo_id": "transformers", "token_count": 1045 }
280
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ViTDet ## Overview The ViTDet model was proposed in [Exploring Plain Vision Transformer Backbones for Object Detection](https://arxiv.org/abs/2203.16527) by Yanghao Li, Hanzi Mao, Ross Girshick, Kaiming He. VitDet leverages the plain [Vision Transformer](vit) for the task of object detection. The abstract from the paper is the following: *We explore the plain, non-hierarchical Vision Transformer (ViT) as a backbone network for object detection. This design enables the original ViT architecture to be fine-tuned for object detection without needing to redesign a hierarchical backbone for pre-training. With minimal adaptations for fine-tuning, our plain-backbone detector can achieve competitive results. Surprisingly, we observe: (i) it is sufficient to build a simple feature pyramid from a single-scale feature map (without the common FPN design) and (ii) it is sufficient to use window attention (without shifting) aided with very few cross-window propagation blocks. With plain ViT backbones pre-trained as Masked Autoencoders (MAE), our detector, named ViTDet, can compete with the previous leading methods that were all based on hierarchical backbones, reaching up to 61.3 AP_box on the COCO dataset using only ImageNet-1K pre-training. We hope our study will draw attention to research on plain-backbone detectors.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/facebookresearch/detectron2/tree/main/projects/ViTDet). Tips: - At the moment, only the backbone is available. ## VitDetConfig [[autodoc]] VitDetConfig ## VitDetModel [[autodoc]] VitDetModel - forward
transformers/docs/source/en/model_doc/vitdet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vitdet.md", "repo_id": "transformers", "token_count": 579 }
281
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # XLM <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=xlm"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-xlm-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/xlm-mlm-en-2048"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The XLM model was proposed in [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample, Alexis Conneau. It's a transformer pretrained using one of the following objectives: - a causal language modeling (CLM) objective (next token prediction), - a masked language modeling (MLM) objective (BERT-like), or - a Translation Language Modeling (TLM) object (extension of BERT's MLM to multiple language inputs) The abstract from the paper is the following: *Recent studies have demonstrated the efficiency of generative pretraining for English natural language understanding. In this work, we extend this approach to multiple languages and show the effectiveness of cross-lingual pretraining. We propose two methods to learn cross-lingual language models (XLMs): one unsupervised that only relies on monolingual data, and one supervised that leverages parallel data with a new cross-lingual language model objective. We obtain state-of-the-art results on cross-lingual classification, unsupervised and supervised machine translation. On XNLI, our approach pushes the state of the art by an absolute gain of 4.9% accuracy. On unsupervised machine translation, we obtain 34.3 BLEU on WMT'16 German-English, improving the previous state of the art by more than 9 BLEU. On supervised machine translation, we obtain a new state of the art of 38.5 BLEU on WMT'16 Romanian-English, outperforming the previous best approach by more than 4 BLEU. Our code and pretrained models will be made publicly available.* This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/facebookresearch/XLM/). ## Usage tips - XLM has many different checkpoints, which were trained using different objectives: CLM, MLM or TLM. Make sure to select the correct objective for your task (e.g. MLM checkpoints are not suitable for generation). - XLM has multilingual checkpoints which leverage a specific `lang` parameter. Check out the [multi-lingual](../multilingual) page for more information. - A transformer model trained on several languages. There are three different type of training for this model and the library provides checkpoints for all of them: * Causal language modeling (CLM) which is the traditional autoregressive training (so this model could be in the previous section as well). One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages. * Masked language modeling (MLM) which is like RoBERTa. One of the languages is selected for each training sample, and the model input is a sentence of 256 tokens, that may span over several documents in one of those languages, with dynamic masking of the tokens. * A combination of MLM and translation language modeling (TLM). This consists of concatenating a sentence in two different languages, with random masking. To predict one of the masked tokens, the model can use both, the surrounding context in language 1 and the context given by language 2. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## XLMConfig [[autodoc]] XLMConfig ## XLMTokenizer [[autodoc]] XLMTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## XLM specific outputs [[autodoc]] models.xlm.modeling_xlm.XLMForQuestionAnsweringOutput <frameworkcontent> <pt> ## XLMModel [[autodoc]] XLMModel - forward ## XLMWithLMHeadModel [[autodoc]] XLMWithLMHeadModel - forward ## XLMForSequenceClassification [[autodoc]] XLMForSequenceClassification - forward ## XLMForMultipleChoice [[autodoc]] XLMForMultipleChoice - forward ## XLMForTokenClassification [[autodoc]] XLMForTokenClassification - forward ## XLMForQuestionAnsweringSimple [[autodoc]] XLMForQuestionAnsweringSimple - forward ## XLMForQuestionAnswering [[autodoc]] XLMForQuestionAnswering - forward </pt> <tf> ## TFXLMModel [[autodoc]] TFXLMModel - call ## TFXLMWithLMHeadModel [[autodoc]] TFXLMWithLMHeadModel - call ## TFXLMForSequenceClassification [[autodoc]] TFXLMForSequenceClassification - call ## TFXLMForMultipleChoice [[autodoc]] TFXLMForMultipleChoice - call ## TFXLMForTokenClassification [[autodoc]] TFXLMForTokenClassification - call ## TFXLMForQuestionAnsweringSimple [[autodoc]] TFXLMForQuestionAnsweringSimple - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/xlm.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlm.md", "repo_id": "transformers", "token_count": 1744 }
282
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CPU inference With some optimizations, it is possible to efficiently run large model inference on a CPU. One of these optimization techniques involves compiling the PyTorch code into an intermediate format for high-performance environments like C++. The other technique fuses multiple operations into one kernel to reduce the overhead of running each operation separately. You'll learn how to use [BetterTransformer](https://pytorch.org/blog/a-better-transformer-for-fast-transformer-encoder-inference/) for faster inference, and how to convert your PyTorch code to [TorchScript](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html). If you're using an Intel CPU, you can also use [graph optimizations](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features.html#graph-optimization) from [Intel Extension for PyTorch](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/index.html) to boost inference speed even more. Finally, learn how to use 🤗 Optimum to accelerate inference with ONNX Runtime or OpenVINO (if you're using an Intel CPU). ## BetterTransformer BetterTransformer accelerates inference with its fastpath (native PyTorch specialized implementation of Transformer functions) execution. The two optimizations in the fastpath execution are: 1. fusion, which combines multiple sequential operations into a single "kernel" to reduce the number of computation steps 2. skipping the inherent sparsity of padding tokens to avoid unnecessary computation with nested tensors BetterTransformer also converts all attention operations to use the more memory-efficient [scaled dot product attention](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention). <Tip> BetterTransformer is not supported for all models. Check this [list](https://huggingface.co/docs/optimum/bettertransformer/overview#supported-models) to see if a model supports BetterTransformer. </Tip> Before you start, make sure you have 🤗 Optimum [installed](https://huggingface.co/docs/optimum/installation). Enable BetterTransformer with the [`PreTrainedModel.to_bettertransformer`] method: ```py from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("bigcode/starcoder") model.to_bettertransformer() ``` ## TorchScript TorchScript is an intermediate PyTorch model representation that can be run in production environments where performance is important. You can train a model in PyTorch and then export it to TorchScript to free the model from Python performance constraints. PyTorch [traces](https://pytorch.org/docs/stable/generated/torch.jit.trace.html) a model to return a [`ScriptFunction`] that is optimized with just-in-time compilation (JIT). Compared to the default eager mode, JIT mode in PyTorch typically yields better performance for inference using optimization techniques like operator fusion. For a gentle introduction to TorchScript, see the [Introduction to PyTorch TorchScript](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html) tutorial. With the [`Trainer`] class, you can enable JIT mode for CPU inference by setting the `--jit_mode_eval` flag: ```bash python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ --jit_mode_eval ``` <Tip warning={true}> For PyTorch >= 1.14.0, JIT-mode could benefit any model for prediction and evaluation since the dict input is supported in `jit.trace`. For PyTorch < 1.14.0, JIT-mode could benefit a model if its forward parameter order matches the tuple input order in `jit.trace`, such as a question-answering model. If the forward parameter order does not match the tuple input order in `jit.trace`, like a text classification model, `jit.trace` will fail and we are capturing this with the exception here to make it fallback. Logging is used to notify users. </Tip> ## IPEX graph optimization Intel® Extension for PyTorch (IPEX) provides further optimizations in JIT mode for Intel CPUs, and we recommend combining it with TorchScript for even faster performance. The IPEX [graph optimization](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features/graph_optimization.html) fuses operations like Multi-head attention, Concat Linear, Linear + Add, Linear + Gelu, Add + LayerNorm, and more. To take advantage of these graph optimizations, make sure you have IPEX [installed](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html): ```bash pip install intel_extension_for_pytorch ``` Set the `--use_ipex` and `--jit_mode_eval` flags in the [`Trainer`] class to enable JIT mode with the graph optimizations: ```bash python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ --use_ipex \ --jit_mode_eval ``` ## 🤗 Optimum <Tip> Learn more details about using ORT with 🤗 Optimum in the [Optimum Inference with ONNX Runtime](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/models) guide. This section only provides a brief and simple example. </Tip> ONNX Runtime (ORT) is a model accelerator that runs inference on CPUs by default. ORT is supported by 🤗 Optimum which can be used in 🤗 Transformers, without making too many changes to your code. You only need to replace the 🤗 Transformers `AutoClass` with its equivalent [`~optimum.onnxruntime.ORTModel`] for the task you're solving, and load a checkpoint in the ONNX format. For example, if you're running inference on a question answering task, load the [optimum/roberta-base-squad2](https://huggingface.co/optimum/roberta-base-squad2) checkpoint which contains a `model.onnx` file: ```py from transformers import AutoTokenizer, pipeline from optimum.onnxruntime import ORTModelForQuestionAnswering model = ORTModelForQuestionAnswering.from_pretrained("optimum/roberta-base-squad2") tokenizer = AutoTokenizer.from_pretrained("deepset/roberta-base-squad2") onnx_qa = pipeline("question-answering", model=model, tokenizer=tokenizer) question = "What's my name?" context = "My name is Philipp and I live in Nuremberg." pred = onnx_qa(question, context) ``` If you have an Intel CPU, take a look at 🤗 [Optimum Intel](https://huggingface.co/docs/optimum/intel/index) which supports a variety of compression techniques (quantization, pruning, knowledge distillation) and tools for converting models to the [OpenVINO](https://huggingface.co/docs/optimum/intel/inference) format for higher performance inference.
transformers/docs/source/en/perf_infer_cpu.md/0
{ "file_path": "transformers/docs/source/en/perf_infer_cpu.md", "repo_id": "transformers", "token_count": 2080 }
283
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # AQLM > [!TIP] > Try AQLM on [Google Colab](https://colab.research.google.com/drive/1-xZmBRXT5Fm3Ghn4Mwa2KRypORXb855X?usp=sharing)! Additive Quantization of Language Models ([AQLM](https://arxiv.org/abs/2401.06118)) is a Large Language Models compression method. It quantizes multiple weights together and take advantage of interdependencies between them. AQLM represents groups of 8-16 weights as a sum of multiple vector codes. Inference support for AQLM is realised in the `aqlm` library. Make sure to install it to run the models (note aqlm works only with python>=3.10): ```bash pip install aqlm[gpu,cpu] ``` The library provides efficient kernels for both GPU and CPU inference and training. The instructions on how to quantize models yourself, as well as all the relevant code can be found in the corresponding GitHub [repository](https://github.com/Vahe1994/AQLM). To run AQLM models simply load a model that has been quantized with AQLM: ```python from transformers import AutoTokenizer, AutoModelForCausalLM quantized_model = AutoModelForCausalLM.from_pretrained( "ISTA-DASLab/Mixtral-8x7b-AQLM-2Bit-1x16-hf", torch_dtype="auto", device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained("ISTA-DASLab/Mixtral-8x7b-AQLM-2Bit-1x16-hf") ``` ## PEFT Starting with version `aqlm 1.0.2`, AQLM supports Parameter-Efficient Fine-Tuning in a form of [LoRA](https://huggingface.co/docs/peft/package_reference/lora) integrated into the [PEFT](https://huggingface.co/blog/peft) library. ## AQLM configurations AQLM quantization setups vary mainly on the number of codebooks used as well as codebook sizes in bits. The most popular setups, as well as inference kernels they support are: | Kernel | Number of codebooks | Codebook size, bits | Notation | Accuracy | Speedup | Fast GPU inference | Fast CPU inference | |---|---------------------|---------------------|----------|-------------|-------------|--------------------|--------------------| | Triton | K | N | KxN | - | Up to ~0.7x | ✅ | ❌ | | CUDA | 1 | 16 | 1x16 | Best | Up to ~1.3x | ✅ | ❌ | | CUDA | 2 | 8 | 2x8 | OK | Up to ~3.0x | ✅ | ❌ | | Numba | K | 8 | Kx8 | Good | Up to ~4.0x | ❌ | ✅ |
transformers/docs/source/en/quantization/aqlm.md/0
{ "file_path": "transformers/docs/source/en/quantization/aqlm.md", "repo_id": "transformers", "token_count": 1243 }
284
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # What 🤗 Transformers can do 🤗 Transformers is a library of pretrained state-of-the-art models for natural language processing (NLP), computer vision, and audio and speech processing tasks. Not only does the library contain Transformer models, but it also has non-Transformer models like modern convolutional networks for computer vision tasks. If you look at some of the most popular consumer products today, like smartphones, apps, and televisions, odds are that some kind of deep learning technology is behind it. Want to remove a background object from a picture taken by your smartphone? This is an example of a panoptic segmentation task (don't worry if you don't know what this means yet, we'll describe it in the following sections!). This page provides an overview of the different speech and audio, computer vision, and NLP tasks that can be solved with the 🤗 Transformers library in just three lines of code! ## Audio Audio and speech processing tasks are a little different from the other modalities mainly because audio as an input is a continuous signal. Unlike text, a raw audio waveform can't be neatly split into discrete chunks the way a sentence can be divided into words. To get around this, the raw audio signal is typically sampled at regular intervals. If you take more samples within an interval, the sampling rate is higher, and the audio more closely resembles the original audio source. Previous approaches preprocessed the audio to extract useful features from it. It is now more common to start audio and speech processing tasks by directly feeding the raw audio waveform to a feature encoder to extract an audio representation. This simplifies the preprocessing step and allows the model to learn the most essential features. ### Audio classification Audio classification is a task that labels audio data from a predefined set of classes. It is a broad category with many specific applications, some of which include: * acoustic scene classification: label audio with a scene label ("office", "beach", "stadium") * acoustic event detection: label audio with a sound event label ("car horn", "whale calling", "glass breaking") * tagging: label audio containing multiple sounds (birdsongs, speaker identification in a meeting) * music classification: label music with a genre label ("metal", "hip-hop", "country") ```py >>> from transformers import pipeline >>> classifier = pipeline(task="audio-classification", model="superb/hubert-base-superb-er") >>> preds = classifier("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4532, 'label': 'hap'}, {'score': 0.3622, 'label': 'sad'}, {'score': 0.0943, 'label': 'neu'}, {'score': 0.0903, 'label': 'ang'}] ``` ### Automatic speech recognition Automatic speech recognition (ASR) transcribes speech into text. It is one of the most common audio tasks due partly to speech being such a natural form of human communication. Today, ASR systems are embedded in "smart" technology products like speakers, phones, and cars. We can ask our virtual assistants to play music, set reminders, and tell us the weather. But one of the key challenges Transformer architectures have helped with is in low-resource languages. By pretraining on large amounts of speech data, finetuning the model on only one hour of labeled speech data in a low-resource language can still produce high-quality results compared to previous ASR systems trained on 100x more labeled data. ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition", model="openai/whisper-small") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` ## Computer vision One of the first and earliest successful computer vision tasks was recognizing images of zip code numbers using a [convolutional neural network (CNN)](glossary#convolution). An image is composed of pixels, and each pixel has a numerical value. This makes it easy to represent an image as a matrix of pixel values. Each particular combination of pixel values describes the colors of an image. Two general ways computer vision tasks can be solved are: 1. Use convolutions to learn the hierarchical features of an image from low-level features to high-level abstract things. 2. Split an image into patches and use a Transformer to gradually learn how each image patch is related to each other to form an image. Unlike the bottom-up approach favored by a CNN, this is kind of like starting out with a blurry image and then gradually bringing it into focus. ### Image classification Image classification labels an entire image from a predefined set of classes. Like most classification tasks, there are many practical use cases for image classification, some of which include: * healthcare: label medical images to detect disease or monitor patient health * environment: label satellite images to monitor deforestation, inform wildland management or detect wildfires * agriculture: label images of crops to monitor plant health or satellite images for land use monitoring * ecology: label images of animal or plant species to monitor wildlife populations or track endangered species ```py >>> from transformers import pipeline >>> classifier = pipeline(task="image-classification") >>> preds = classifier( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> print(*preds, sep="\n") {'score': 0.4335, 'label': 'lynx, catamount'} {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'} {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'} {'score': 0.0239, 'label': 'Egyptian cat'} {'score': 0.0229, 'label': 'tiger cat'} ``` ### Object detection Unlike image classification, object detection identifies multiple objects within an image and the objects' positions in an image (defined by the bounding box). Some example applications of object detection include: * self-driving vehicles: detect everyday traffic objects such as other vehicles, pedestrians, and traffic lights * remote sensing: disaster monitoring, urban planning, and weather forecasting * defect detection: detect cracks or structural damage in buildings, and manufacturing defects ```py >>> from transformers import pipeline >>> detector = pipeline(task="object-detection") >>> preds = detector( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"], "box": pred["box"]} for pred in preds] >>> preds [{'score': 0.9865, 'label': 'cat', 'box': {'xmin': 178, 'ymin': 154, 'xmax': 882, 'ymax': 598}}] ``` ### Image segmentation Image segmentation is a pixel-level task that assigns every pixel in an image to a class. It differs from object detection, which uses bounding boxes to label and predict objects in an image because segmentation is more granular. Segmentation can detect objects at a pixel-level. There are several types of image segmentation: * instance segmentation: in addition to labeling the class of an object, it also labels each distinct instance of an object ("dog-1", "dog-2") * panoptic segmentation: a combination of semantic and instance segmentation; it labels each pixel with a semantic class **and** each distinct instance of an object Segmentation tasks are helpful in self-driving vehicles to create a pixel-level map of the world around them so they can navigate safely around pedestrians and other vehicles. It is also useful for medical imaging, where the task's finer granularity can help identify abnormal cells or organ features. Image segmentation can also be used in ecommerce to virtually try on clothes or create augmented reality experiences by overlaying objects in the real world through your camera. ```py >>> from transformers import pipeline >>> segmenter = pipeline(task="image-segmentation") >>> preds = segmenter( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> print(*preds, sep="\n") {'score': 0.9879, 'label': 'LABEL_184'} {'score': 0.9973, 'label': 'snow'} {'score': 0.9972, 'label': 'cat'} ``` ### Depth estimation Depth estimation predicts the distance of each pixel in an image from the camera. This computer vision task is especially important for scene understanding and reconstruction. For example, in self-driving cars, vehicles need to understand how far objects like pedestrians, traffic signs, and other vehicles are to avoid obstacles and collisions. Depth information is also helpful for constructing 3D representations from 2D images and can be used to create high-quality 3D representations of biological structures or buildings. There are two approaches to depth estimation: * stereo: depths are estimated by comparing two images of the same image from slightly different angles * monocular: depths are estimated from a single image ```py >>> from transformers import pipeline >>> depth_estimator = pipeline(task="depth-estimation") >>> preds = depth_estimator( ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) ``` ## Natural language processing NLP tasks are among the most common types of tasks because text is such a natural way for us to communicate. To get text into a format recognized by a model, it needs to be tokenized. This means dividing a sequence of text into separate words or subwords (tokens) and then converting these tokens into numbers. As a result, you can represent a sequence of text as a sequence of numbers, and once you have a sequence of numbers, it can be input into a model to solve all sorts of NLP tasks! ### Text classification Like classification tasks in any modality, text classification labels a sequence of text (it can be sentence-level, a paragraph, or a document) from a predefined set of classes. There are many practical applications for text classification, some of which include: * sentiment analysis: label text according to some polarity like `positive` or `negative` which can inform and support decision-making in fields like politics, finance, and marketing * content classification: label text according to some topic to help organize and filter information in news and social media feeds (`weather`, `sports`, `finance`, etc.) ```py >>> from transformers import pipeline >>> classifier = pipeline(task="sentiment-analysis") >>> preds = classifier("Hugging Face is the best thing since sliced bread!") >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.9991, 'label': 'POSITIVE'}] ``` ### Token classification In any NLP task, text is preprocessed by separating the sequence of text into individual words or subwords. These are known as [tokens](glossary#token). Token classification assigns each token a label from a predefined set of classes. Two common types of token classification are: * named entity recognition (NER): label a token according to an entity category like organization, person, location or date. NER is especially popular in biomedical settings, where it can label genes, proteins, and drug names. * part-of-speech tagging (POS): label a token according to its part-of-speech like noun, verb, or adjective. POS is useful for helping translation systems understand how two identical words are grammatically different (bank as a noun versus bank as a verb). ```py >>> from transformers import pipeline >>> classifier = pipeline(task="ner") >>> preds = classifier("Hugging Face is a French company based in New York City.") >>> preds = [ ... { ... "entity": pred["entity"], ... "score": round(pred["score"], 4), ... "index": pred["index"], ... "word": pred["word"], ... "start": pred["start"], ... "end": pred["end"], ... } ... for pred in preds ... ] >>> print(*preds, sep="\n") {'entity': 'I-ORG', 'score': 0.9968, 'index': 1, 'word': 'Hu', 'start': 0, 'end': 2} {'entity': 'I-ORG', 'score': 0.9293, 'index': 2, 'word': '##gging', 'start': 2, 'end': 7} {'entity': 'I-ORG', 'score': 0.9763, 'index': 3, 'word': 'Face', 'start': 8, 'end': 12} {'entity': 'I-MISC', 'score': 0.9983, 'index': 6, 'word': 'French', 'start': 18, 'end': 24} {'entity': 'I-LOC', 'score': 0.999, 'index': 10, 'word': 'New', 'start': 42, 'end': 45} {'entity': 'I-LOC', 'score': 0.9987, 'index': 11, 'word': 'York', 'start': 46, 'end': 50} {'entity': 'I-LOC', 'score': 0.9992, 'index': 12, 'word': 'City', 'start': 51, 'end': 55} ``` ### Question answering Question answering is another token-level task that returns an answer to a question, sometimes with context (open-domain) and other times without context (closed-domain). This task happens whenever we ask a virtual assistant something like whether a restaurant is open. It can also provide customer or technical support and help search engines retrieve the relevant information you're asking for. There are two common types of question answering: * extractive: given a question and some context, the answer is a span of text from the context the model must extract * abstractive: given a question and some context, the answer is generated from the context; this approach is handled by the [`Text2TextGenerationPipeline`] instead of the [`QuestionAnsweringPipeline`] shown below ```py >>> from transformers import pipeline >>> question_answerer = pipeline(task="question-answering") >>> preds = question_answerer( ... question="What is the name of the repository?", ... context="The name of the repository is huggingface/transformers", ... ) >>> print( ... f"score: {round(preds['score'], 4)}, start: {preds['start']}, end: {preds['end']}, answer: {preds['answer']}" ... ) score: 0.9327, start: 30, end: 54, answer: huggingface/transformers ``` ### Summarization Summarization creates a shorter version of a text from a longer one while trying to preserve most of the meaning of the original document. Summarization is a sequence-to-sequence task; it outputs a shorter text sequence than the input. There are a lot of long-form documents that can be summarized to help readers quickly understand the main points. Legislative bills, legal and financial documents, patents, and scientific papers are a few examples of documents that could be summarized to save readers time and serve as a reading aid. Like question answering, there are two types of summarization: * extractive: identify and extract the most important sentences from the original text * abstractive: generate the target summary (which may include new words not in the input document) from the original text; the [`SummarizationPipeline`] uses the abstractive approach ```py >>> from transformers import pipeline >>> summarizer = pipeline(task="summarization") >>> summarizer( ... "In this work, we presented the Transformer, the first sequence transduction model based entirely on attention, replacing the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention. For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers. On both WMT 2014 English-to-German and WMT 2014 English-to-French translation tasks, we achieve a new state of the art. In the former task our best model outperforms even all previously reported ensembles." ... ) [{'summary_text': ' The Transformer is the first sequence transduction model based entirely on attention . It replaces the recurrent layers most commonly used in encoder-decoder architectures with multi-headed self-attention . For translation tasks, the Transformer can be trained significantly faster than architectures based on recurrent or convolutional layers .'}] ``` ### Translation Translation converts a sequence of text in one language to another. It is important in helping people from different backgrounds communicate with each other, help translate content to reach wider audiences, and even be a learning tool to help people learn a new language. Along with summarization, translation is a sequence-to-sequence task, meaning the model receives an input sequence and returns a target output sequence. In the early days, translation models were mostly monolingual, but recently, there has been increasing interest in multilingual models that can translate between many pairs of languages. ```py >>> from transformers import pipeline >>> text = "translate English to French: Hugging Face is a community-based open-source platform for machine learning." >>> translator = pipeline(task="translation", model="google-t5/t5-small") >>> translator(text) [{'translation_text': "Hugging Face est une tribune communautaire de l'apprentissage des machines."}] ``` ### Language modeling Language modeling is a task that predicts a word in a sequence of text. It has become a very popular NLP task because a pretrained language model can be finetuned for many other downstream tasks. Lately, there has been a lot of interest in large language models (LLMs) which demonstrate zero- or few-shot learning. This means the model can solve tasks it wasn't explicitly trained to do! Language models can be used to generate fluent and convincing text, though you need to be careful since the text may not always be accurate. There are two types of language modeling: * causal: the model's objective is to predict the next token in a sequence, and future tokens are masked ```py >>> from transformers import pipeline >>> prompt = "Hugging Face is a community-based open-source platform for machine learning." >>> generator = pipeline(task="text-generation") >>> generator(prompt) # doctest: +SKIP ``` * masked: the model's objective is to predict a masked token in a sequence with full access to the tokens in the sequence ```py >>> text = "Hugging Face is a community-based open-source <mask> for machine learning." >>> fill_mask = pipeline(task="fill-mask") >>> preds = fill_mask(text, top_k=1) >>> preds = [ ... { ... "score": round(pred["score"], 4), ... "token": pred["token"], ... "token_str": pred["token_str"], ... "sequence": pred["sequence"], ... } ... for pred in preds ... ] >>> preds [{'score': 0.2236, 'token': 1761, 'token_str': ' platform', 'sequence': 'Hugging Face is a community-based open-source platform for machine learning.'}] ``` ## Multimodal Multimodal tasks require a model to process multiple data modalities (text, image, audio, video) to solve a particular problem. Image captioning is an example of a multimodal task where the model takes an image as input and outputs a sequence of text describing the image or some properties of the image. Although multimodal models work with different data types or modalities, internally, the preprocessing steps help the model convert all the data types into embeddings (vectors or list of numbers that holds meaningful information about the data). For a task like image captioning, the model learns relationships between image embeddings and text embeddings. ### Document question answering Document question answering is a task that answers natural language questions from a document. Unlike a token-level question answering task which takes text as input, document question answering takes an image of a document as input along with a question about the document and returns an answer. Document question answering can be used to parse structured documents and extract key information from it. In the example below, the total amount and change due can be extracted from a receipt. ```py >>> from transformers import pipeline >>> from PIL import Image >>> import requests >>> url = "https://huggingface.co/datasets/hf-internal-testing/example-documents/resolve/main/jpeg_images/2.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> doc_question_answerer = pipeline("document-question-answering", model="magorshunov/layoutlm-invoices") >>> preds = doc_question_answerer( ... question="What is the total amount?", ... image=image, ... ) >>> preds [{'score': 0.8531, 'answer': '17,000', 'start': 4, 'end': 4}] ``` Hopefully, this page has given you some more background information about all the types of tasks in each modality and the practical importance of each one. In the next [section](tasks_explained), you'll learn **how** 🤗 Transformers work to solve these tasks.
transformers/docs/source/en/task_summary.md/0
{ "file_path": "transformers/docs/source/en/task_summary.md", "repo_id": "transformers", "token_count": 5673 }
285
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Object detection [[open-in-colab]] Object detection is the computer vision task of detecting instances (such as humans, buildings, or cars) in an image. Object detection models receive an image as input and output coordinates of the bounding boxes and associated labels of the detected objects. An image can contain multiple objects, each with its own bounding box and a label (e.g. it can have a car and a building), and each object can be present in different parts of an image (e.g. the image can have several cars). This task is commonly used in autonomous driving for detecting things like pedestrians, road signs, and traffic lights. Other applications include counting objects in images, image search, and more. In this guide, you will learn how to: 1. Finetune [DETR](https://huggingface.co/docs/transformers/model_doc/detr), a model that combines a convolutional backbone with an encoder-decoder Transformer, on the [CPPE-5](https://huggingface.co/datasets/cppe-5) dataset. 2. Use your finetuned model for inference. <Tip> To see all architectures and checkpoints compatible with this task, we recommend checking the [task-page](https://huggingface.co/tasks/object-detection) </Tip> Before you begin, make sure you have all the necessary libraries installed: ```bash pip install -q datasets transformers accelerate timm pip install -q -U albumentations>=1.4.5 torchmetrics pycocotools ``` You'll use 🤗 Datasets to load a dataset from the Hugging Face Hub, 🤗 Transformers to train your model, and `albumentations` to augment the data. We encourage you to share your model with the community. Log in to your Hugging Face account to upload it to the Hub. When prompted, enter your token to log in: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` To get started, we'll define global constants, namely the model name and image size. For this tutorial, we'll use the conditional DETR model due to its faster convergence. Feel free to select any object detection model available in the `transformers` library. ```py >>> MODEL_NAME = "microsoft/conditional-detr-resnet-50" # or "facebook/detr-resnet-50" >>> IMAGE_SIZE = 480 ``` ## Load the CPPE-5 dataset The [CPPE-5 dataset](https://huggingface.co/datasets/cppe-5) contains images with annotations identifying medical personal protective equipment (PPE) in the context of the COVID-19 pandemic. Start by loading the dataset and creating a `validation` split from `train`: ```py >>> from datasets import load_dataset >>> cppe5 = load_dataset("cppe-5") >>> if "validation" not in cppe5: ... split = cppe5["train"].train_test_split(0.15, seed=1337) ... cppe5["train"] = split["train"] ... cppe5["validation"] = split["test"] >>> cppe5 DatasetDict({ train: Dataset({ features: ['image_id', 'image', 'width', 'height', 'objects'], num_rows: 850 }) test: Dataset({ features: ['image_id', 'image', 'width', 'height', 'objects'], num_rows: 29 }) validation: Dataset({ features: ['image_id', 'image', 'width', 'height', 'objects'], num_rows: 150 }) }) ``` You'll see that this dataset has 1000 images for train and validation sets and a test set with 29 images. To get familiar with the data, explore what the examples look like. ```py >>> cppe5["train"][0] { 'image_id': 366, 'image': <PIL.PngImagePlugin.PngImageFile image mode=RGBA size=500x290>, 'width': 500, 'height': 500, 'objects': { 'id': [1932, 1933, 1934], 'area': [27063, 34200, 32431], 'bbox': [[29.0, 11.0, 97.0, 279.0], [201.0, 1.0, 120.0, 285.0], [382.0, 0.0, 113.0, 287.0]], 'category': [0, 0, 0] } } ``` The examples in the dataset have the following fields: - `image_id`: the example image id - `image`: a `PIL.Image.Image` object containing the image - `width`: width of the image - `height`: height of the image - `objects`: a dictionary containing bounding box metadata for the objects in the image: - `id`: the annotation id - `area`: the area of the bounding box - `bbox`: the object's bounding box (in the [COCO format](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) ) - `category`: the object's category, with possible values including `Coverall (0)`, `Face_Shield (1)`, `Gloves (2)`, `Goggles (3)` and `Mask (4)` You may notice that the `bbox` field follows the COCO format, which is the format that the DETR model expects. However, the grouping of the fields inside `objects` differs from the annotation format DETR requires. You will need to apply some preprocessing transformations before using this data for training. To get an even better understanding of the data, visualize an example in the dataset. ```py >>> import numpy as np >>> import os >>> from PIL import Image, ImageDraw >>> image = cppe5["train"][2]["image"] >>> annotations = cppe5["train"][2]["objects"] >>> draw = ImageDraw.Draw(image) >>> categories = cppe5["train"].features["objects"].feature["category"].names >>> id2label = {index: x for index, x in enumerate(categories, start=0)} >>> label2id = {v: k for k, v in id2label.items()} >>> for i in range(len(annotations["id"])): ... box = annotations["bbox"][i] ... class_idx = annotations["category"][i] ... x, y, w, h = tuple(box) ... # Check if coordinates are normalized or not ... if max(box) > 1.0: ... # Coordinates are un-normalized, no need to re-scale them ... x1, y1 = int(x), int(y) ... x2, y2 = int(x + w), int(y + h) ... else: ... # Coordinates are normalized, re-scale them ... x1 = int(x * width) ... y1 = int(y * height) ... x2 = int((x + w) * width) ... y2 = int((y + h) * height) ... draw.rectangle((x, y, x + w, y + h), outline="red", width=1) ... draw.text((x, y), id2label[class_idx], fill="white") >>> image ``` <div class="flex justify-center"> <img src="https://i.imgur.com/oVQb9SF.png" alt="CPPE-5 Image Example"/> </div> To visualize the bounding boxes with associated labels, you can get the labels from the dataset's metadata, specifically the `category` field. You'll also want to create dictionaries that map a label id to a label class (`id2label`) and the other way around (`label2id`). You can use them later when setting up the model. Including these maps will make your model reusable by others if you share it on the Hugging Face Hub. Please note that, the part of above code that draws the bounding boxes assume that it is in `COCO` format `(x_min, y_min, width, height)`. It has to be adjusted to work for other formats like `(x_min, y_min, x_max, y_max)`. As a final step of getting familiar with the data, explore it for potential issues. One common problem with datasets for object detection is bounding boxes that "stretch" beyond the edge of the image. Such "runaway" bounding boxes can raise errors during training and should be addressed. There are a few examples with this issue in this dataset. To keep things simple in this guide, we will set `clip=True` for `BboxParams` in transformations below. ## Preprocess the data To finetune a model, you must preprocess the data you plan to use to match precisely the approach used for the pre-trained model. [`AutoImageProcessor`] takes care of processing image data to create `pixel_values`, `pixel_mask`, and `labels` that a DETR model can train with. The image processor has some attributes that you won't have to worry about: - `image_mean = [0.485, 0.456, 0.406 ]` - `image_std = [0.229, 0.224, 0.225]` These are the mean and standard deviation used to normalize images during the model pre-training. These values are crucial to replicate when doing inference or finetuning a pre-trained image model. Instantiate the image processor from the same checkpoint as the model you want to finetune. ```py >>> from transformers import AutoImageProcessor >>> MAX_SIZE = IMAGE_SIZE >>> image_processor = AutoImageProcessor.from_pretrained( ... MODEL_NAME, ... do_resize=True, ... size={"max_height": MAX_SIZE, "max_width": MAX_SIZE}, ... do_pad=True, ... pad_size={"height": MAX_SIZE, "width": MAX_SIZE}, ... ) ``` Before passing the images to the `image_processor`, apply two preprocessing transformations to the dataset: - Augmenting images - Reformatting annotations to meet DETR expectations First, to make sure the model does not overfit on the training data, you can apply image augmentation with any data augmentation library. Here we use [Albumentations](https://albumentations.ai/docs/). This library ensures that transformations affect the image and update the bounding boxes accordingly. The 🤗 Datasets library documentation has a detailed [guide on how to augment images for object detection](https://huggingface.co/docs/datasets/object_detection), and it uses the exact same dataset as an example. Apply some geometric and color transformations to the image. For additional augmentation options, explore the [Albumentations Demo Space](https://huggingface.co/spaces/qubvel-hf/albumentations-demo). ```py >>> import albumentations as A >>> train_augment_and_transform = A.Compose( ... [ ... A.Perspective(p=0.1), ... A.HorizontalFlip(p=0.5), ... A.RandomBrightnessContrast(p=0.5), ... A.HueSaturationValue(p=0.1), ... ], ... bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True, min_area=25), ... ) >>> validation_transform = A.Compose( ... [A.NoOp()], ... bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True), ... ) ``` The `image_processor` expects the annotations to be in the following format: `{'image_id': int, 'annotations': List[Dict]}`, where each dictionary is a COCO object annotation. Let's add a function to reformat annotations for a single example: ```py >>> def format_image_annotations_as_coco(image_id, categories, areas, bboxes): ... """Format one set of image annotations to the COCO format ... Args: ... image_id (str): image id. e.g. "0001" ... categories (List[int]): list of categories/class labels corresponding to provided bounding boxes ... areas (List[float]): list of corresponding areas to provided bounding boxes ... bboxes (List[Tuple[float]]): list of bounding boxes provided in COCO format ... ([center_x, center_y, width, height] in absolute coordinates) ... Returns: ... dict: { ... "image_id": image id, ... "annotations": list of formatted annotations ... } ... """ ... annotations = [] ... for category, area, bbox in zip(categories, areas, bboxes): ... formatted_annotation = { ... "image_id": image_id, ... "category_id": category, ... "iscrowd": 0, ... "area": area, ... "bbox": list(bbox), ... } ... annotations.append(formatted_annotation) ... return { ... "image_id": image_id, ... "annotations": annotations, ... } ``` Now you can combine the image and annotation transformations to use on a batch of examples: ```py >>> def augment_and_transform_batch(examples, transform, image_processor, return_pixel_mask=False): ... """Apply augmentations and format annotations in COCO format for object detection task""" ... images = [] ... annotations = [] ... for image_id, image, objects in zip(examples["image_id"], examples["image"], examples["objects"]): ... image = np.array(image.convert("RGB")) ... # apply augmentations ... output = transform(image=image, bboxes=objects["bbox"], category=objects["category"]) ... images.append(output["image"]) ... # format annotations in COCO format ... formatted_annotations = format_image_annotations_as_coco( ... image_id, output["category"], objects["area"], output["bboxes"] ... ) ... annotations.append(formatted_annotations) ... # Apply the image processor transformations: resizing, rescaling, normalization ... result = image_processor(images=images, annotations=annotations, return_tensors="pt") ... if not return_pixel_mask: ... result.pop("pixel_mask", None) ... return result ``` Apply this preprocessing function to the entire dataset using 🤗 Datasets [`~datasets.Dataset.with_transform`] method. This method applies transformations on the fly when you load an element of the dataset. At this point, you can check what an example from the dataset looks like after the transformations. You should see a tensor with `pixel_values`, a tensor with `pixel_mask`, and `labels`. ```py >>> from functools import partial >>> # Make transform functions for batch and apply for dataset splits >>> train_transform_batch = partial( ... augment_and_transform_batch, transform=train_augment_and_transform, image_processor=image_processor ... ) >>> validation_transform_batch = partial( ... augment_and_transform_batch, transform=validation_transform, image_processor=image_processor ... ) >>> cppe5["train"] = cppe5["train"].with_transform(train_transform_batch) >>> cppe5["validation"] = cppe5["validation"].with_transform(validation_transform_batch) >>> cppe5["test"] = cppe5["test"].with_transform(validation_transform_batch) >>> cppe5["train"][15] {'pixel_values': tensor([[[ 1.9235, 1.9407, 1.9749, ..., -0.7822, -0.7479, -0.6965], [ 1.9578, 1.9749, 1.9920, ..., -0.7993, -0.7650, -0.7308], [ 2.0092, 2.0092, 2.0263, ..., -0.8507, -0.8164, -0.7822], ..., [ 0.0741, 0.0741, 0.0741, ..., 0.0741, 0.0741, 0.0741], [ 0.0741, 0.0741, 0.0741, ..., 0.0741, 0.0741, 0.0741], [ 0.0741, 0.0741, 0.0741, ..., 0.0741, 0.0741, 0.0741]], [[ 1.6232, 1.6408, 1.6583, ..., 0.8704, 1.0105, 1.1331], [ 1.6408, 1.6583, 1.6758, ..., 0.8529, 0.9930, 1.0980], [ 1.6933, 1.6933, 1.7108, ..., 0.8179, 0.9580, 1.0630], ..., [ 0.2052, 0.2052, 0.2052, ..., 0.2052, 0.2052, 0.2052], [ 0.2052, 0.2052, 0.2052, ..., 0.2052, 0.2052, 0.2052], [ 0.2052, 0.2052, 0.2052, ..., 0.2052, 0.2052, 0.2052]], [[ 1.8905, 1.9080, 1.9428, ..., -0.1487, -0.0964, -0.0615], [ 1.9254, 1.9428, 1.9603, ..., -0.1661, -0.1138, -0.0790], [ 1.9777, 1.9777, 1.9951, ..., -0.2010, -0.1138, -0.0790], ..., [ 0.4265, 0.4265, 0.4265, ..., 0.4265, 0.4265, 0.4265], [ 0.4265, 0.4265, 0.4265, ..., 0.4265, 0.4265, 0.4265], [ 0.4265, 0.4265, 0.4265, ..., 0.4265, 0.4265, 0.4265]]]), 'labels': {'image_id': tensor([688]), 'class_labels': tensor([3, 4, 2, 0, 0]), 'boxes': tensor([[0.4700, 0.1933, 0.1467, 0.0767], [0.4858, 0.2600, 0.1150, 0.1000], [0.4042, 0.4517, 0.1217, 0.1300], [0.4242, 0.3217, 0.3617, 0.5567], [0.6617, 0.4033, 0.5400, 0.4533]]), 'area': tensor([ 4048., 4140., 5694., 72478., 88128.]), 'iscrowd': tensor([0, 0, 0, 0, 0]), 'orig_size': tensor([480, 480])}} ``` You have successfully augmented the individual images and prepared their annotations. However, preprocessing isn't complete yet. In the final step, create a custom `collate_fn` to batch images together. Pad images (which are now `pixel_values`) to the largest image in a batch, and create a corresponding `pixel_mask` to indicate which pixels are real (1) and which are padding (0). ```py >>> import torch >>> def collate_fn(batch): ... data = {} ... data["pixel_values"] = torch.stack([x["pixel_values"] for x in batch]) ... data["labels"] = [x["labels"] for x in batch] ... if "pixel_mask" in batch[0]: ... data["pixel_mask"] = torch.stack([x["pixel_mask"] for x in batch]) ... return data ``` ## Preparing function to compute mAP Object detection models are commonly evaluated with a set of <a href="https://cocodataset.org/#detection-eval">COCO-style metrics</a>. We are going to use `torchmetrics` to compute `mAP` (mean average precision) and `mAR` (mean average recall) metrics and will wrap it to `compute_metrics` function in order to use in [`Trainer`] for evaluation. Intermediate format of boxes used for training is `YOLO` (normalized) but we will compute metrics for boxes in `Pascal VOC` (absolute) format in order to correctly handle box areas. Let's define a function that converts bounding boxes to `Pascal VOC` format: ```py >>> from transformers.image_transforms import center_to_corners_format >>> def convert_bbox_yolo_to_pascal(boxes, image_size): ... """ ... Convert bounding boxes from YOLO format (x_center, y_center, width, height) in range [0, 1] ... to Pascal VOC format (x_min, y_min, x_max, y_max) in absolute coordinates. ... Args: ... boxes (torch.Tensor): Bounding boxes in YOLO format ... image_size (Tuple[int, int]): Image size in format (height, width) ... Returns: ... torch.Tensor: Bounding boxes in Pascal VOC format (x_min, y_min, x_max, y_max) ... """ ... # convert center to corners format ... boxes = center_to_corners_format(boxes) ... # convert to absolute coordinates ... height, width = image_size ... boxes = boxes * torch.tensor([[width, height, width, height]]) ... return boxes ``` Then, in `compute_metrics` function we collect `predicted` and `target` bounding boxes, scores and labels from evaluation loop results and pass it to the scoring function. ```py >>> import numpy as np >>> from dataclasses import dataclass >>> from torchmetrics.detection.mean_ap import MeanAveragePrecision >>> @dataclass >>> class ModelOutput: ... logits: torch.Tensor ... pred_boxes: torch.Tensor >>> @torch.no_grad() >>> def compute_metrics(evaluation_results, image_processor, threshold=0.0, id2label=None): ... """ ... Compute mean average mAP, mAR and their variants for the object detection task. ... Args: ... evaluation_results (EvalPrediction): Predictions and targets from evaluation. ... threshold (float, optional): Threshold to filter predicted boxes by confidence. Defaults to 0.0. ... id2label (Optional[dict], optional): Mapping from class id to class name. Defaults to None. ... Returns: ... Mapping[str, float]: Metrics in a form of dictionary {<metric_name>: <metric_value>} ... """ ... predictions, targets = evaluation_results.predictions, evaluation_results.label_ids ... # For metric computation we need to provide: ... # - targets in a form of list of dictionaries with keys "boxes", "labels" ... # - predictions in a form of list of dictionaries with keys "boxes", "scores", "labels" ... image_sizes = [] ... post_processed_targets = [] ... post_processed_predictions = [] ... # Collect targets in the required format for metric computation ... for batch in targets: ... # collect image sizes, we will need them for predictions post processing ... batch_image_sizes = torch.tensor(np.array([x["orig_size"] for x in batch])) ... image_sizes.append(batch_image_sizes) ... # collect targets in the required format for metric computation ... # boxes were converted to YOLO format needed for model training ... # here we will convert them to Pascal VOC format (x_min, y_min, x_max, y_max) ... for image_target in batch: ... boxes = torch.tensor(image_target["boxes"]) ... boxes = convert_bbox_yolo_to_pascal(boxes, image_target["orig_size"]) ... labels = torch.tensor(image_target["class_labels"]) ... post_processed_targets.append({"boxes": boxes, "labels": labels}) ... # Collect predictions in the required format for metric computation, ... # model produce boxes in YOLO format, then image_processor convert them to Pascal VOC format ... for batch, target_sizes in zip(predictions, image_sizes): ... batch_logits, batch_boxes = batch[1], batch[2] ... output = ModelOutput(logits=torch.tensor(batch_logits), pred_boxes=torch.tensor(batch_boxes)) ... post_processed_output = image_processor.post_process_object_detection( ... output, threshold=threshold, target_sizes=target_sizes ... ) ... post_processed_predictions.extend(post_processed_output) ... # Compute metrics ... metric = MeanAveragePrecision(box_format="xyxy", class_metrics=True) ... metric.update(post_processed_predictions, post_processed_targets) ... metrics = metric.compute() ... # Replace list of per class metrics with separate metric for each class ... classes = metrics.pop("classes") ... map_per_class = metrics.pop("map_per_class") ... mar_100_per_class = metrics.pop("mar_100_per_class") ... for class_id, class_map, class_mar in zip(classes, map_per_class, mar_100_per_class): ... class_name = id2label[class_id.item()] if id2label is not None else class_id.item() ... metrics[f"map_{class_name}"] = class_map ... metrics[f"mar_100_{class_name}"] = class_mar ... metrics = {k: round(v.item(), 4) for k, v in metrics.items()} ... return metrics >>> eval_compute_metrics_fn = partial( ... compute_metrics, image_processor=image_processor, id2label=id2label, threshold=0.0 ... ) ``` ## Training the detection model You have done most of the heavy lifting in the previous sections, so now you are ready to train your model! The images in this dataset are still quite large, even after resizing. This means that finetuning this model will require at least one GPU. Training involves the following steps: 1. Load the model with [`AutoModelForObjectDetection`] using the same checkpoint as in the preprocessing. 2. Define your training hyperparameters in [`TrainingArguments`]. 3. Pass the training arguments to [`Trainer`] along with the model, dataset, image processor, and data collator. 4. Call [`~Trainer.train`] to finetune your model. When loading the model from the same checkpoint that you used for the preprocessing, remember to pass the `label2id` and `id2label` maps that you created earlier from the dataset's metadata. Additionally, we specify `ignore_mismatched_sizes=True` to replace the existing classification head with a new one. ```py >>> from transformers import AutoModelForObjectDetection >>> model = AutoModelForObjectDetection.from_pretrained( ... MODEL_NAME, ... id2label=id2label, ... label2id=label2id, ... ignore_mismatched_sizes=True, ... ) ``` In the [`TrainingArguments`] use `output_dir` to specify where to save your model, then configure hyperparameters as you see fit. For `num_train_epochs=30` training will take about 35 minutes in Google Colab T4 GPU, increase the number of epoch to get better results. Important notes: - Do not remove unused columns because this will drop the image column. Without the image column, you can't create `pixel_values`. For this reason, set `remove_unused_columns` to `False`. - Set `eval_do_concat_batches=False` to get proper evaluation results. Images have different number of target boxes, if batches are concatenated we will not be able to determine which boxes belongs to particular image. If you wish to share your model by pushing to the Hub, set `push_to_hub` to `True` (you must be signed in to Hugging Face to upload your model). ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="detr_finetuned_cppe5", ... num_train_epochs=30, ... fp16=False, ... per_device_train_batch_size=8, ... dataloader_num_workers=4, ... learning_rate=5e-5, ... lr_scheduler_type="cosine", ... weight_decay=1e-4, ... max_grad_norm=0.01, ... metric_for_best_model="eval_map", ... greater_is_better=True, ... load_best_model_at_end=True, ... eval_strategy="epoch", ... save_strategy="epoch", ... save_total_limit=2, ... remove_unused_columns=False, ... eval_do_concat_batches=False, ... push_to_hub=True, ... ) ``` Finally, bring everything together, and call [`~transformers.Trainer.train`]: ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=cppe5["train"], ... eval_dataset=cppe5["validation"], ... tokenizer=image_processor, ... data_collator=collate_fn, ... compute_metrics=eval_compute_metrics_fn, ... ) >>> trainer.train() ``` <div> <progress value='3210' max='3210' style='width:300px; height:20px; vertical-align: middle;'></progress> [3210/3210 26:07, Epoch 30/30] </div> <table border="1" class="dataframe"> <thead> <tr style="text-align: left;"> <th>Epoch</th> <th>Training Loss</th> <th>Validation Loss</th> <th>Map</th> <th>Map 50</th> <th>Map 75</th> <th>Map Small</th> <th>Map Medium</th> <th>Map Large</th> <th>Mar 1</th> <th>Mar 10</th> <th>Mar 100</th> <th>Mar Small</th> <th>Mar Medium</th> <th>Mar Large</th> <th>Map Coverall</th> <th>Mar 100 Coverall</th> <th>Map Face Shield</th> <th>Mar 100 Face Shield</th> <th>Map Gloves</th> <th>Mar 100 Gloves</th> <th>Map Goggles</th> <th>Mar 100 Goggles</th> <th>Map Mask</th> <th>Mar 100 Mask</th> </tr> </thead> <tbody> <tr> <td>1</td> <td>No log</td> <td>2.629903</td> <td>0.008900</td> <td>0.023200</td> <td>0.006500</td> <td>0.001300</td> <td>0.002800</td> <td>0.020500</td> <td>0.021500</td> <td>0.070400</td> <td>0.101400</td> <td>0.007600</td> <td>0.106200</td> <td>0.096100</td> <td>0.036700</td> <td>0.232000</td> <td>0.000300</td> <td>0.019000</td> <td>0.003900</td> <td>0.125400</td> <td>0.000100</td> <td>0.003100</td> <td>0.003500</td> <td>0.127600</td> </tr> <tr> <td>2</td> <td>No log</td> <td>3.479864</td> <td>0.014800</td> <td>0.034600</td> <td>0.010800</td> <td>0.008600</td> <td>0.011700</td> <td>0.012500</td> <td>0.041100</td> <td>0.098700</td> <td>0.130000</td> <td>0.056000</td> <td>0.062200</td> <td>0.111900</td> <td>0.053500</td> <td>0.447300</td> <td>0.010600</td> <td>0.100000</td> <td>0.000200</td> <td>0.022800</td> <td>0.000100</td> <td>0.015400</td> <td>0.009700</td> <td>0.064400</td> </tr> <tr> <td>3</td> <td>No log</td> <td>2.107622</td> <td>0.041700</td> <td>0.094000</td> <td>0.034300</td> <td>0.024100</td> <td>0.026400</td> <td>0.047400</td> <td>0.091500</td> <td>0.182800</td> <td>0.225800</td> <td>0.087200</td> <td>0.199400</td> <td>0.210600</td> <td>0.150900</td> <td>0.571200</td> <td>0.017300</td> <td>0.101300</td> <td>0.007300</td> <td>0.180400</td> <td>0.002100</td> <td>0.026200</td> <td>0.031000</td> <td>0.250200</td> </tr> <tr> <td>4</td> <td>No log</td> <td>2.031242</td> <td>0.055900</td> <td>0.120600</td> <td>0.046900</td> <td>0.013800</td> <td>0.038100</td> <td>0.090300</td> <td>0.105900</td> <td>0.225600</td> <td>0.266100</td> <td>0.130200</td> <td>0.228100</td> <td>0.330000</td> <td>0.191000</td> <td>0.572100</td> <td>0.010600</td> <td>0.157000</td> <td>0.014600</td> <td>0.235300</td> <td>0.001700</td> <td>0.052300</td> <td>0.061800</td> <td>0.313800</td> </tr> <tr> <td>5</td> <td>3.889400</td> <td>1.883433</td> <td>0.089700</td> <td>0.201800</td> <td>0.067300</td> <td>0.022800</td> <td>0.065300</td> <td>0.129500</td> <td>0.136000</td> <td>0.272200</td> <td>0.303700</td> <td>0.112900</td> <td>0.312500</td> <td>0.424600</td> <td>0.300200</td> <td>0.585100</td> <td>0.032700</td> <td>0.202500</td> <td>0.031300</td> <td>0.271000</td> <td>0.008700</td> <td>0.126200</td> <td>0.075500</td> <td>0.333800</td> </tr> <tr> <td>6</td> <td>3.889400</td> <td>1.807503</td> <td>0.118500</td> <td>0.270900</td> <td>0.090200</td> <td>0.034900</td> <td>0.076700</td> <td>0.152500</td> <td>0.146100</td> <td>0.297800</td> <td>0.325400</td> <td>0.171700</td> <td>0.283700</td> <td>0.545900</td> <td>0.396900</td> <td>0.554500</td> <td>0.043000</td> <td>0.262000</td> <td>0.054500</td> <td>0.271900</td> <td>0.020300</td> <td>0.230800</td> <td>0.077600</td> <td>0.308000</td> </tr> <tr> <td>7</td> <td>3.889400</td> <td>1.716169</td> <td>0.143500</td> <td>0.307700</td> <td>0.123200</td> <td>0.045800</td> <td>0.097800</td> <td>0.258300</td> <td>0.165300</td> <td>0.327700</td> <td>0.352600</td> <td>0.140900</td> <td>0.336700</td> <td>0.599400</td> <td>0.442900</td> <td>0.620700</td> <td>0.069400</td> <td>0.301300</td> <td>0.081600</td> <td>0.292000</td> <td>0.011000</td> <td>0.230800</td> <td>0.112700</td> <td>0.318200</td> </tr> <tr> <td>8</td> <td>3.889400</td> <td>1.679014</td> <td>0.153000</td> <td>0.355800</td> <td>0.127900</td> <td>0.038700</td> <td>0.115600</td> <td>0.291600</td> <td>0.176000</td> <td>0.322500</td> <td>0.349700</td> <td>0.135600</td> <td>0.326100</td> <td>0.643700</td> <td>0.431700</td> <td>0.582900</td> <td>0.069800</td> <td>0.265800</td> <td>0.088600</td> <td>0.274600</td> <td>0.028300</td> <td>0.280000</td> <td>0.146700</td> <td>0.345300</td> </tr> <tr> <td>9</td> <td>3.889400</td> <td>1.618239</td> <td>0.172100</td> <td>0.375300</td> <td>0.137600</td> <td>0.046100</td> <td>0.141700</td> <td>0.308500</td> <td>0.194000</td> <td>0.356200</td> <td>0.386200</td> <td>0.162400</td> <td>0.359200</td> <td>0.677700</td> <td>0.469800</td> <td>0.623900</td> <td>0.102100</td> <td>0.317700</td> <td>0.099100</td> <td>0.290200</td> <td>0.029300</td> <td>0.335400</td> <td>0.160200</td> <td>0.364000</td> </tr> <tr> <td>10</td> <td>1.599700</td> <td>1.572512</td> <td>0.179500</td> <td>0.400400</td> <td>0.147200</td> <td>0.056500</td> <td>0.141700</td> <td>0.316700</td> <td>0.213100</td> <td>0.357600</td> <td>0.381300</td> <td>0.197900</td> <td>0.344300</td> <td>0.638500</td> <td>0.466900</td> <td>0.623900</td> <td>0.101300</td> <td>0.311400</td> <td>0.104700</td> <td>0.279500</td> <td>0.051600</td> <td>0.338500</td> <td>0.173000</td> <td>0.353300</td> </tr> <tr> <td>11</td> <td>1.599700</td> <td>1.528889</td> <td>0.192200</td> <td>0.415000</td> <td>0.160800</td> <td>0.053700</td> <td>0.150500</td> <td>0.378000</td> <td>0.211500</td> <td>0.371700</td> <td>0.397800</td> <td>0.204900</td> <td>0.374600</td> <td>0.684800</td> <td>0.491900</td> <td>0.632400</td> <td>0.131200</td> <td>0.346800</td> <td>0.122000</td> <td>0.300900</td> <td>0.038400</td> <td>0.344600</td> <td>0.177500</td> <td>0.364400</td> </tr> <tr> <td>12</td> <td>1.599700</td> <td>1.517532</td> <td>0.198300</td> <td>0.429800</td> <td>0.159800</td> <td>0.066400</td> <td>0.162900</td> <td>0.383300</td> <td>0.220700</td> <td>0.382100</td> <td>0.405400</td> <td>0.214800</td> <td>0.383200</td> <td>0.672900</td> <td>0.469000</td> <td>0.610400</td> <td>0.167800</td> <td>0.379700</td> <td>0.119700</td> <td>0.307100</td> <td>0.038100</td> <td>0.335400</td> <td>0.196800</td> <td>0.394200</td> </tr> <tr> <td>13</td> <td>1.599700</td> <td>1.488849</td> <td>0.209800</td> <td>0.452300</td> <td>0.172300</td> <td>0.094900</td> <td>0.171100</td> <td>0.437800</td> <td>0.222000</td> <td>0.379800</td> <td>0.411500</td> <td>0.203800</td> <td>0.397300</td> <td>0.707500</td> <td>0.470700</td> <td>0.620700</td> <td>0.186900</td> <td>0.407600</td> <td>0.124200</td> <td>0.306700</td> <td>0.059300</td> <td>0.355400</td> <td>0.207700</td> <td>0.367100</td> </tr> <tr> <td>14</td> <td>1.599700</td> <td>1.482210</td> <td>0.228900</td> <td>0.482600</td> <td>0.187800</td> <td>0.083600</td> <td>0.191800</td> <td>0.444100</td> <td>0.225900</td> <td>0.376900</td> <td>0.407400</td> <td>0.182500</td> <td>0.384800</td> <td>0.700600</td> <td>0.512100</td> <td>0.640100</td> <td>0.175000</td> <td>0.363300</td> <td>0.144300</td> <td>0.300000</td> <td>0.083100</td> <td>0.363100</td> <td>0.229900</td> <td>0.370700</td> </tr> <tr> <td>15</td> <td>1.326800</td> <td>1.475198</td> <td>0.216300</td> <td>0.455600</td> <td>0.174900</td> <td>0.088500</td> <td>0.183500</td> <td>0.424400</td> <td>0.226900</td> <td>0.373400</td> <td>0.404300</td> <td>0.199200</td> <td>0.396400</td> <td>0.677800</td> <td>0.496300</td> <td>0.633800</td> <td>0.166300</td> <td>0.392400</td> <td>0.128900</td> <td>0.312900</td> <td>0.085200</td> <td>0.312300</td> <td>0.205000</td> <td>0.370200</td> </tr> <tr> <td>16</td> <td>1.326800</td> <td>1.459697</td> <td>0.233200</td> <td>0.504200</td> <td>0.192200</td> <td>0.096000</td> <td>0.202000</td> <td>0.430800</td> <td>0.239100</td> <td>0.382400</td> <td>0.412600</td> <td>0.219500</td> <td>0.403100</td> <td>0.670400</td> <td>0.485200</td> <td>0.625200</td> <td>0.196500</td> <td>0.410100</td> <td>0.135700</td> <td>0.299600</td> <td>0.123100</td> <td>0.356900</td> <td>0.225300</td> <td>0.371100</td> </tr> <tr> <td>17</td> <td>1.326800</td> <td>1.407340</td> <td>0.243400</td> <td>0.511900</td> <td>0.204500</td> <td>0.121000</td> <td>0.215700</td> <td>0.468000</td> <td>0.246200</td> <td>0.394600</td> <td>0.424200</td> <td>0.225900</td> <td>0.416100</td> <td>0.705200</td> <td>0.494900</td> <td>0.638300</td> <td>0.224900</td> <td>0.430400</td> <td>0.157200</td> <td>0.317900</td> <td>0.115700</td> <td>0.369200</td> <td>0.224200</td> <td>0.365300</td> </tr> <tr> <td>18</td> <td>1.326800</td> <td>1.419522</td> <td>0.245100</td> <td>0.521500</td> <td>0.210000</td> <td>0.116100</td> <td>0.211500</td> <td>0.489900</td> <td>0.255400</td> <td>0.391600</td> <td>0.419700</td> <td>0.198800</td> <td>0.421200</td> <td>0.701400</td> <td>0.501800</td> <td>0.634200</td> <td>0.226700</td> <td>0.410100</td> <td>0.154400</td> <td>0.321400</td> <td>0.105900</td> <td>0.352300</td> <td>0.236700</td> <td>0.380400</td> </tr> <tr> <td>19</td> <td>1.158600</td> <td>1.398764</td> <td>0.253600</td> <td>0.519200</td> <td>0.213600</td> <td>0.135200</td> <td>0.207700</td> <td>0.491900</td> <td>0.257300</td> <td>0.397300</td> <td>0.428000</td> <td>0.241400</td> <td>0.401800</td> <td>0.703500</td> <td>0.509700</td> <td>0.631100</td> <td>0.236700</td> <td>0.441800</td> <td>0.155900</td> <td>0.330800</td> <td>0.128100</td> <td>0.352300</td> <td>0.237500</td> <td>0.384000</td> </tr> <tr> <td>20</td> <td>1.158600</td> <td>1.390591</td> <td>0.248800</td> <td>0.520200</td> <td>0.216600</td> <td>0.127500</td> <td>0.211400</td> <td>0.471900</td> <td>0.258300</td> <td>0.407000</td> <td>0.429100</td> <td>0.240300</td> <td>0.407600</td> <td>0.708500</td> <td>0.505800</td> <td>0.623400</td> <td>0.235500</td> <td>0.431600</td> <td>0.150000</td> <td>0.325000</td> <td>0.125700</td> <td>0.375400</td> <td>0.227200</td> <td>0.390200</td> </tr> <tr> <td>21</td> <td>1.158600</td> <td>1.360608</td> <td>0.262700</td> <td>0.544800</td> <td>0.222100</td> <td>0.134700</td> <td>0.230000</td> <td>0.487500</td> <td>0.269500</td> <td>0.413300</td> <td>0.436300</td> <td>0.236200</td> <td>0.419100</td> <td>0.709300</td> <td>0.514100</td> <td>0.637400</td> <td>0.257200</td> <td>0.450600</td> <td>0.165100</td> <td>0.338400</td> <td>0.139400</td> <td>0.372300</td> <td>0.237700</td> <td>0.382700</td> </tr> <tr> <td>22</td> <td>1.158600</td> <td>1.368296</td> <td>0.262800</td> <td>0.542400</td> <td>0.236400</td> <td>0.137400</td> <td>0.228100</td> <td>0.498500</td> <td>0.266500</td> <td>0.409000</td> <td>0.433000</td> <td>0.239900</td> <td>0.418500</td> <td>0.697500</td> <td>0.520500</td> <td>0.641000</td> <td>0.257500</td> <td>0.455700</td> <td>0.162600</td> <td>0.334800</td> <td>0.140200</td> <td>0.353800</td> <td>0.233200</td> <td>0.379600</td> </tr> <tr> <td>23</td> <td>1.158600</td> <td>1.368176</td> <td>0.264800</td> <td>0.541100</td> <td>0.233100</td> <td>0.138200</td> <td>0.223900</td> <td>0.498700</td> <td>0.272300</td> <td>0.407400</td> <td>0.434400</td> <td>0.233100</td> <td>0.418300</td> <td>0.702000</td> <td>0.524400</td> <td>0.642300</td> <td>0.262300</td> <td>0.444300</td> <td>0.159700</td> <td>0.335300</td> <td>0.140500</td> <td>0.366200</td> <td>0.236900</td> <td>0.384000</td> </tr> <tr> <td>24</td> <td>1.049700</td> <td>1.355271</td> <td>0.269700</td> <td>0.549200</td> <td>0.239100</td> <td>0.134700</td> <td>0.229900</td> <td>0.519200</td> <td>0.274800</td> <td>0.412700</td> <td>0.437600</td> <td>0.245400</td> <td>0.417200</td> <td>0.711200</td> <td>0.523200</td> <td>0.644100</td> <td>0.272100</td> <td>0.440500</td> <td>0.166700</td> <td>0.341500</td> <td>0.137700</td> <td>0.373800</td> <td>0.249000</td> <td>0.388000</td> </tr> <tr> <td>25</td> <td>1.049700</td> <td>1.355180</td> <td>0.272500</td> <td>0.547900</td> <td>0.243800</td> <td>0.149700</td> <td>0.229900</td> <td>0.523100</td> <td>0.272500</td> <td>0.415700</td> <td>0.442200</td> <td>0.256200</td> <td>0.420200</td> <td>0.705800</td> <td>0.523900</td> <td>0.639600</td> <td>0.271700</td> <td>0.451900</td> <td>0.166300</td> <td>0.346900</td> <td>0.153700</td> <td>0.383100</td> <td>0.247000</td> <td>0.389300</td> </tr> <tr> <td>26</td> <td>1.049700</td> <td>1.349337</td> <td>0.275600</td> <td>0.556300</td> <td>0.246400</td> <td>0.146700</td> <td>0.234800</td> <td>0.516300</td> <td>0.274200</td> <td>0.418300</td> <td>0.440900</td> <td>0.248700</td> <td>0.418900</td> <td>0.705800</td> <td>0.523200</td> <td>0.636500</td> <td>0.274700</td> <td>0.440500</td> <td>0.172400</td> <td>0.349100</td> <td>0.155600</td> <td>0.384600</td> <td>0.252300</td> <td>0.393800</td> </tr> <tr> <td>27</td> <td>1.049700</td> <td>1.350782</td> <td>0.275200</td> <td>0.548700</td> <td>0.246800</td> <td>0.147300</td> <td>0.236400</td> <td>0.527200</td> <td>0.280100</td> <td>0.416200</td> <td>0.442600</td> <td>0.253400</td> <td>0.424000</td> <td>0.710300</td> <td>0.526600</td> <td>0.640100</td> <td>0.273200</td> <td>0.445600</td> <td>0.167000</td> <td>0.346900</td> <td>0.160100</td> <td>0.387700</td> <td>0.249200</td> <td>0.392900</td> </tr> <tr> <td>28</td> <td>1.049700</td> <td>1.346533</td> <td>0.277000</td> <td>0.552800</td> <td>0.252900</td> <td>0.147400</td> <td>0.240000</td> <td>0.527600</td> <td>0.280900</td> <td>0.420900</td> <td>0.444100</td> <td>0.255500</td> <td>0.424500</td> <td>0.711200</td> <td>0.530200</td> <td>0.646800</td> <td>0.277400</td> <td>0.441800</td> <td>0.170900</td> <td>0.346900</td> <td>0.156600</td> <td>0.389200</td> <td>0.249600</td> <td>0.396000</td> </tr> <tr> <td>29</td> <td>0.993700</td> <td>1.346575</td> <td>0.277100</td> <td>0.554800</td> <td>0.252900</td> <td>0.148400</td> <td>0.239700</td> <td>0.523600</td> <td>0.278400</td> <td>0.420000</td> <td>0.443300</td> <td>0.256300</td> <td>0.424000</td> <td>0.705600</td> <td>0.529600</td> <td>0.647300</td> <td>0.273900</td> <td>0.439200</td> <td>0.174300</td> <td>0.348700</td> <td>0.157600</td> <td>0.386200</td> <td>0.250100</td> <td>0.395100</td> </tr> <tr> <td>30</td> <td>0.993700</td> <td>1.346446</td> <td>0.277400</td> <td>0.554700</td> <td>0.252700</td> <td>0.147900</td> <td>0.240800</td> <td>0.523600</td> <td>0.278800</td> <td>0.420400</td> <td>0.443300</td> <td>0.256100</td> <td>0.424200</td> <td>0.705500</td> <td>0.530100</td> <td>0.646800</td> <td>0.275600</td> <td>0.440500</td> <td>0.174500</td> <td>0.348700</td> <td>0.157300</td> <td>0.386200</td> <td>0.249200</td> <td>0.394200</td> </tr> </tbody> </table><p> If you have set `push_to_hub` to `True` in the `training_args`, the training checkpoints are pushed to the Hugging Face Hub. Upon training completion, push the final model to the Hub as well by calling the [`~transformers.Trainer.push_to_hub`] method. ```py >>> trainer.push_to_hub() ``` ## Evaluate ```py >>> from pprint import pprint >>> metrics = trainer.evaluate(eval_dataset=cppe5["test"], metric_key_prefix="test") >>> pprint(metrics) {'epoch': 30.0, 'test_loss': 1.0877351760864258, 'test_map': 0.4116, 'test_map_50': 0.741, 'test_map_75': 0.3663, 'test_map_Coverall': 0.5937, 'test_map_Face_Shield': 0.5863, 'test_map_Gloves': 0.3416, 'test_map_Goggles': 0.1468, 'test_map_Mask': 0.3894, 'test_map_large': 0.5637, 'test_map_medium': 0.3257, 'test_map_small': 0.3589, 'test_mar_1': 0.323, 'test_mar_10': 0.5237, 'test_mar_100': 0.5587, 'test_mar_100_Coverall': 0.6756, 'test_mar_100_Face_Shield': 0.7294, 'test_mar_100_Gloves': 0.4721, 'test_mar_100_Goggles': 0.4125, 'test_mar_100_Mask': 0.5038, 'test_mar_large': 0.7283, 'test_mar_medium': 0.4901, 'test_mar_small': 0.4469, 'test_runtime': 1.6526, 'test_samples_per_second': 17.548, 'test_steps_per_second': 2.42} ``` These results can be further improved by adjusting the hyperparameters in [`TrainingArguments`]. Give it a go! ## Inference Now that you have finetuned a model, evaluated it, and uploaded it to the Hugging Face Hub, you can use it for inference. ```py >>> import torch >>> import requests >>> from PIL import Image, ImageDraw >>> from transformers import AutoImageProcessor, AutoModelForObjectDetection >>> url = "https://images.pexels.com/photos/8413299/pexels-photo-8413299.jpeg?auto=compress&cs=tinysrgb&w=630&h=375&dpr=2" >>> image = Image.open(requests.get(url, stream=True).raw) ``` Load model and image processor from the Hugging Face Hub (skip to use already trained in this session): ```py >>> device = "cuda" >>> model_repo = "qubvel-hf/detr_finetuned_cppe5" >>> image_processor = AutoImageProcessor.from_pretrained(model_repo) >>> model = AutoModelForObjectDetection.from_pretrained(model_repo) >>> model = model.to(device) ``` And detect bounding boxes: ```py >>> with torch.no_grad(): ... inputs = image_processor(images=[image], return_tensors="pt") ... outputs = model(**inputs.to(device)) ... target_sizes = torch.tensor([[image.size[1], image.size[0]]]) ... results = image_processor.post_process_object_detection(outputs, threshold=0.3, target_sizes=target_sizes)[0] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected Gloves with confidence 0.683 at location [244.58, 124.33, 300.35, 185.13] Detected Mask with confidence 0.517 at location [143.73, 64.58, 219.57, 125.89] Detected Gloves with confidence 0.425 at location [179.15, 155.57, 262.4, 226.35] Detected Coverall with confidence 0.407 at location [307.13, -1.18, 477.82, 318.06] Detected Coverall with confidence 0.391 at location [68.61, 126.66, 309.03, 318.89] ``` Let's plot the result: ```py >>> draw = ImageDraw.Draw(image) >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... x, y, x2, y2 = tuple(box) ... draw.rectangle((x, y, x2, y2), outline="red", width=1) ... draw.text((x, y), model.config.id2label[label.item()], fill="white") >>> image ``` <div class="flex justify-center"> <img src="https://i.imgur.com/oDUqD0K.png" alt="Object detection result on a new image"/> </div>
transformers/docs/source/en/tasks/object_detection.md/0
{ "file_path": "transformers/docs/source/en/tasks/object_detection.md", "repo_id": "transformers", "token_count": 23679 }
286
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # XLA Integration for TensorFlow Models [[open-in-colab]] Accelerated Linear Algebra, dubbed XLA, is a compiler for accelerating the runtime of TensorFlow Models. From the [official documentation](https://www.tensorflow.org/xla): XLA (Accelerated Linear Algebra) is a domain-specific compiler for linear algebra that can accelerate TensorFlow models with potentially no source code changes. Using XLA in TensorFlow is simple – it comes packaged inside the `tensorflow` library, and it can be triggered with the `jit_compile` argument in any graph-creating function such as [`tf.function`](https://www.tensorflow.org/guide/intro_to_graphs). When using Keras methods like `fit()` and `predict()`, you can enable XLA simply by passing the `jit_compile` argument to `model.compile()`. However, XLA is not limited to these methods - it can also be used to accelerate any arbitrary `tf.function`. Several TensorFlow methods in 🤗 Transformers have been rewritten to be XLA-compatible, including text generation for models such as [GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2), [T5](https://huggingface.co/docs/transformers/model_doc/t5) and [OPT](https://huggingface.co/docs/transformers/model_doc/opt), as well as speech processing for models such as [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper). While the exact amount of speed-up is very much model-dependent, for TensorFlow text generation models inside 🤗 Transformers, we noticed a speed-up of ~100x. This document will explain how you can use XLA for these models to get the maximum amount of performance. We’ll also provide links to additional resources if you’re interested to learn more about the benchmarks and our design philosophy behind the XLA integration. ## Running TF functions with XLA Let us consider the following model in TensorFlow: ```py import tensorflow as tf model = tf.keras.Sequential( [tf.keras.layers.Dense(10, input_shape=(10,), activation="relu"), tf.keras.layers.Dense(5, activation="softmax")] ) ``` The above model accepts inputs having a dimension of `(10, )`. We can use the model for running a forward pass like so: ```py # Generate random inputs for the model. batch_size = 16 input_vector_dim = 10 random_inputs = tf.random.normal((batch_size, input_vector_dim)) # Run a forward pass. _ = model(random_inputs) ``` In order to run the forward pass with an XLA-compiled function, we’d need to do: ```py xla_fn = tf.function(model, jit_compile=True) _ = xla_fn(random_inputs) ``` The default `call()` function of the `model` is used for compiling the XLA graph. But if there’s any other model function you want to compile into XLA that’s also possible with: ```py my_xla_fn = tf.function(model.my_xla_fn, jit_compile=True) ``` ## Running a TF text generation model with XLA from 🤗 Transformers To enable XLA-accelerated generation within 🤗 Transformers, you need to have a recent version of `transformers` installed. You can install it by running: ```bash pip install transformers --upgrade ``` And then you can run the following code: ```py import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForCausalLM # Will error if the minimal version of Transformers is not installed. from transformers.utils import check_min_version check_min_version("4.21.0") tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2", padding_side="left", pad_token="</s>") model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2") input_string = ["TensorFlow is"] # One line to create an XLA generation function xla_generate = tf.function(model.generate, jit_compile=True) tokenized_input = tokenizer(input_string, return_tensors="tf") generated_tokens = xla_generate(**tokenized_input, num_beams=2) decoded_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True) print(f"Generated -- {decoded_text}") # Generated -- TensorFlow is an open-source, open-source, distributed-source application # framework for the ``` As you can notice, enabling XLA on `generate()` is just a single line of code. The rest of the code remains unchanged. However, there are a couple of gotchas in the above code snippet that are specific to XLA. You need to be aware of those to realize the speed-ups that XLA can bring in. We discuss these in the following section. ## Gotchas to be aware of When you are executing an XLA-enabled function (like `xla_generate()` above) for the first time, it will internally try to infer the computation graph, which is time-consuming. This process is known as [“tracing”](https://www.tensorflow.org/guide/intro_to_graphs#when_is_a_function_tracing). You might notice that the generation time is not fast. Successive calls of `xla_generate()` (or any other XLA-enabled function) won’t have to infer the computation graph, given the inputs to the function follow the same shape with which the computation graph was initially built. While this is not a problem for modalities with fixed input shapes (e.g., images), you must pay attention if you are working with variable input shape modalities (e.g., text). To ensure `xla_generate()` always operates with the same input shapes, you can specify the `padding` arguments when calling the tokenizer. ```py import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2", padding_side="left", pad_token="</s>") model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2") input_string = ["TensorFlow is"] xla_generate = tf.function(model.generate, jit_compile=True) # Here, we call the tokenizer with padding options. tokenized_input = tokenizer(input_string, pad_to_multiple_of=8, padding=True, return_tensors="tf") generated_tokens = xla_generate(**tokenized_input, num_beams=2) decoded_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True) print(f"Generated -- {decoded_text}") ``` This way, you can ensure that the inputs to `xla_generate()` will always receive inputs with the shape it was traced with and thus leading to speed-ups in the generation time. You can verify this with the code below: ```py import time import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2", padding_side="left", pad_token="</s>") model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2") xla_generate = tf.function(model.generate, jit_compile=True) for input_string in ["TensorFlow is", "TensorFlow is a", "TFLite is a"]: tokenized_input = tokenizer(input_string, pad_to_multiple_of=8, padding=True, return_tensors="tf") start = time.time_ns() generated_tokens = xla_generate(**tokenized_input, num_beams=2) end = time.time_ns() print(f"Execution time -- {(end - start) / 1e6:.1f} ms\n") ``` On a Tesla T4 GPU, you can expect the outputs like so: ```bash Execution time -- 30819.6 ms Execution time -- 79.0 ms Execution time -- 78.9 ms ``` The first call to `xla_generate()` is time-consuming because of tracing, but the successive calls are orders of magnitude faster. Keep in mind that any change in the generation options at any point will trigger re-tracing and thus leading to slow-downs in the generation time. We didn’t cover all the text generation options 🤗 Transformers provides in this document. We encourage you to read the documentation for advanced use cases. ## Additional Resources Here, we leave you with some additional resources if you want to delve deeper into XLA in 🤗 Transformers and in general. * [This Colab Notebook](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/91_tf_xla_generate.ipynb) provides an interactive demonstration if you want to fiddle with the XLA-compatible encoder-decoder (like [T5](https://huggingface.co/docs/transformers/model_doc/t5)) and decoder-only (like [GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2)) text generation models. * [This blog post](https://huggingface.co/blog/tf-xla-generate) provides an overview of the comparison benchmarks for XLA-compatible models along with a friendly introduction to XLA in TensorFlow. * [This blog post](https://blog.tensorflow.org/2022/11/how-hugging-face-improved-text-generation-performance-with-xla.html) discusses our design philosophy behind adding XLA support to the TensorFlow models in 🤗 Transformers. * Recommended posts for learning more about XLA and TensorFlow graphs in general: * [XLA: Optimizing Compiler for Machine Learning](https://www.tensorflow.org/xla) * [Introduction to graphs and tf.function](https://www.tensorflow.org/guide/intro_to_graphs) * [Better performance with tf.function](https://www.tensorflow.org/guide/function)
transformers/docs/source/en/tf_xla.md/0
{ "file_path": "transformers/docs/source/en/tf_xla.md", "repo_id": "transformers", "token_count": 2860 }
287
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Convertir checkpoints de Tensorflow Te proporcionamos una interfaz de línea de comando (`CLI`, por sus siglas en inglés) para convertir puntos de control (_checkpoints_) originales de Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM en modelos que se puedan cargar utilizando los métodos `from_pretrained` de la biblioteca. <Tip> Desde 2.3.0, el script para convertir es parte de la CLI de transformers (**transformers-cli**) disponible en cualquier instalación de transformers >= 2.3.0. La siguiente documentación refleja el formato para el comando **transformers-cli convert**. </Tip> ## BERT Puedes convertir cualquier checkpoint de TensorFlow para BERT (en particular, [los modelos pre-entrenados y publicados por Google](https://github.com/google-research/bert#pre-trained-models)) en un archivo de PyTorch mediante el script [convert_bert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py). Esta CLI toma como entrada un checkpoint de TensorFlow (tres archivos que comienzan con `bert_model.ckpt`) y el archivo de configuración asociado (`bert_config.json`), y crea un modelo PyTorch para esta configuración, carga los pesos del checkpoint de TensorFlow en el modelo de PyTorch y guarda el modelo resultante en un archivo estándar de PyTorch que se puede importar usando `from_pretrained()` (ve el ejemplo en [Tour rápido](quicktour), [run_glue.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_glue.py)). Solo necesitas ejecutar este script **una vez** para convertir un modelo a PyTorch. Después, puedes ignorar el checkpoint de TensorFlow (los tres archivos que comienzan con `bert_model.ckpt`), pero asegúrate de conservar el archivo de configuración (`bert_config.json`) y el archivo de vocabulario (`vocab.txt`) ya que estos también son necesarios para el modelo en PyTorch. Para ejecutar este script deberás tener instalado TensorFlow y PyTorch (`pip install tensorflow`). El resto del repositorio solo requiere PyTorch. Aquí hay un ejemplo del proceso para convertir un modelo `BERT-Base Uncased` pre-entrenado: ```bash export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12 transformers-cli convert --model_type bert \ --tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \ --config $BERT_BASE_DIR/bert_config.json \ --pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin ``` Puedes descargar los modelos pre-entrenados de Google para la conversión [aquí](https://github.com/google-research/bert#pre-trained-models). ## ALBERT Convierte los checkpoints del modelo ALBERT de TensorFlow a PyTorch usando el script [convert_albert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py). La CLI toma como entrada un checkpoint de TensorFlow (tres archivos que comienzan con `model.ckpt-best`) y el archivo de configuración adjunto (`albert_config.json`), luego crea y guarda un modelo de PyTorch. Para ejecutar esta conversión deberás tener instalados TensorFlow y PyTorch. Aquí hay un ejemplo del proceso para convertir un modelo `ALBERT Base` pre-entrenado: ```bash export ALBERT_BASE_DIR=/path/to/albert/albert_base transformers-cli convert --model_type albert \ --tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \ --config $ALBERT_BASE_DIR/albert_config.json \ --pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin ``` Puedes descargar los modelos pre-entrenados de Google para la conversión [aquí](https://github.com/google-research/albert#pre-trained-models). ## OpenAI GPT Este es un ejemplo del proceso para convertir un modelo OpenAI GPT pre-entrenado, asumiendo que tu checkpoint de NumPy se guarda con el mismo formato que el modelo pre-entrenado de OpenAI (más información [aquí](https://github.com/openai/finetune-transformer-lm)): ```bash export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights transformers-cli convert --model_type gpt \ --tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ [--config OPENAI_GPT_CONFIG] \ [--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \ ``` ## OpenAI GPT-2 Aquí hay un ejemplo del proceso para convertir un modelo OpenAI GPT-2 pre-entrenado (más información [aquí](https://github.com/openai/gpt-2)): ```bash export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/openai-community/gpt2/pretrained/weights transformers-cli convert --model_type gpt2 \ --tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ [--config OPENAI_GPT2_CONFIG] \ [--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK] ``` ## XLNet Aquí hay un ejemplo del proceso para convertir un modelo XLNet pre-entrenado: ```bash export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config transformers-cli convert --model_type xlnet \ --tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \ --config $TRANSFO_XL_CONFIG_PATH \ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT \ [--finetuning_task_name XLNET_FINETUNED_TASK] \ ``` ## XLM Aquí hay un ejemplo del proceso para convertir un modelo XLM pre-entrenado: ```bash export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint transformers-cli convert --model_type xlm \ --tf_checkpoint $XLM_CHECKPOINT_PATH \ --pytorch_dump_output $PYTORCH_DUMP_OUTPUT [--config XML_CONFIG] \ [--finetuning_task_name XML_FINETUNED_TASK] ``` ## T5 Aquí hay un ejemplo del proceso para convertir un modelo T5 pre-entrenado: ```bash export T5=/path/to/t5/uncased_L-12_H-768_A-12 transformers-cli convert --model_type t5 \ --tf_checkpoint $T5/t5_model.ckpt \ --config $T5/t5_config.json \ --pytorch_dump_output $T5/pytorch_model.bin ```
transformers/docs/source/es/converting_tensorflow_models.md/0
{ "file_path": "transformers/docs/source/es/converting_tensorflow_models.md", "repo_id": "transformers", "token_count": 2424 }
288
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Uso de un flujo de trabajo para un servidor web <Tip> Crear un motor de inferencia es un tema complejo, y la "mejor" solución probablemente dependerá de tu caso de uso. ¿Estás en CPU o en GPU? ¿Quieres la latencia más baja, el rendimiento más alto, soporte para muchos modelos o simplemente optimizar altamente un modelo específico? Hay muchas formas de abordar este tema, así que lo que vamos a presentar es un buen valor predeterminado para comenzar, que no necesariamente será la solución más óptima para ti. </Tip> Lo fundamental para entender es que podemos usar un iterador, tal como [en un conjunto de datos](pipeline_tutorial#uso-de-pipelines-en-un-conjunto-de-datos), ya que un servidor web es básicamente un sistema que espera solicitudes y las trata a medida que llegan. Por lo general, los servidores web están multiplexados (multihilo, asíncrono, etc.) para manejar varias solicitudes simultáneamente. Por otro lado, los flujos de trabajo (y principalmente los modelos subyacentes) no son realmente ideales para el paralelismo; consumen mucha RAM, por lo que es mejor darles todos los recursos disponibles cuando se están ejecutando o es un trabajo intensivo en cómputo. Vamos a resolver esto haciendo que el servidor web maneje la carga ligera de recibir y enviar solicitudes, y que un único hilo maneje el trabajo real. Este ejemplo va a utilizar `starlette`. El marco de trabajo no es realmente importante, pero es posible que debas ajustar o cambiar el código si estás utilizando otro para lograr el mismo efecto. Crear `server.py`: ```py from starlette.applications import Starlette from starlette.responses import JSONResponse from starlette.routing import Route from transformers import pipeline import asyncio async def homepage(request): payload = await request.body() string = payload.decode("utf-8") response_q = asyncio.Queue() await request.app.model_queue.put((string, response_q)) output = await response_q.get() return JSONResponse(output) async def server_loop(q): pipe = pipeline(model="google-bert/bert-base-uncased") while True: (string, response_q) = await q.get() out = pipe(string) await response_q.put(out) app = Starlette( routes=[ Route("/", homepage, methods=["POST"]), ], ) @app.on_event("startup") async def startup_event(): q = asyncio.Queue() app.model_queue = q asyncio.create_task(server_loop(q)) ``` Ahora puedes empezar con: ```bash uvicorn server:app ``` Y puedes consultarlo con: ```bash curl -X POST -d "test [MASK]" http://localhost:8000/ #[{"score":0.7742936015129089,"token":1012,"token_str":".","sequence":"test."},...] ``` ¡Y listo, ahora tienes una buena idea de cómo crear un servidor web! Lo realmente importante es cargar el modelo solo **una vez**, de modo que no haya copias del modelo en el servidor web. De esta manera, no se utiliza RAM innecesariamente. Luego, el mecanismo de queuing (colas) te permite hacer cosas sofisticadas como acumular algunos elementos antes de inferir para usar el agrupamiento dinámico: <Tip warning={true}> El ejemplo de código a continuación está escrito intencionalmente como pseudocódigo para facilitar la lectura. ¡No lo ejecutes sin verificar si tiene sentido para los recursos de tu sistema! </Tip> ```py (string, rq) = await q.get() strings = [] queues = [] while True: try: (string, rq) = await asyncio.wait_for(q.get(), timeout=0.001) # 1ms except asyncio.exceptions.TimeoutError: break strings.append(string) queues.append(rq) strings outs = pipe(strings, batch_size=len(strings)) for rq, out in zip(queues, outs): await rq.put(out) ``` Nuevamente, el código propuesto está optimizado para la legibilidad, no para ser el mejor código. En primer lugar, no hay límite de tamaño de lote, lo cual generalmente no es una buena idea. Luego, el tiempo de espera se restablece en cada obtención de la cola, lo que significa que podrías esperar mucho más de 1ms antes de ejecutar la inferencia (retrasando la primera solicitud en esa cantidad). Sería mejor tener un único plazo de 1ms. Esto siempre esperará 1ms incluso si la cola está vacía, lo que podría no ser lo mejor ya que probablemente quieras comenzar a hacer inferencias si no hay nada en la cola. Pero tal vez tenga sentido si el agrupamiento es realmente crucial para tu caso de uso. Nuevamente, no hay una solución única y mejor. ## Algunas cosas que podrías considerar ### Comprobación de errores Hay muchas cosas que pueden salir mal en producción: falta de memoria, falta de espacio, cargar el modelo podría fallar, la consulta podría ser incorrecta, la consulta podría ser correcta pero aún así fallar debido a una mala configuración del modelo, y así sucesivamente. Generalmente, es bueno que el servidor muestre los errores al usuario, por lo que agregar muchos bloques `try..except` para mostrar esos errores es una buena idea. Pero ten en cuenta que también puede ser un riesgo de seguridad revelar todos esos errores dependiendo de tu contexto de seguridad. ### Interrupción de circuito Los servidores web suelen verse mejor cuando hacen interrupciones de circuitos. Significa que devuelven errores adecuados cuando están sobrecargados en lugar de simplemente esperar la consulta indefinidamente. Devolver un error 503 en lugar de esperar un tiempo muy largo o un error 504 después de mucho tiempo. Esto es relativamente fácil de implementar en el código propuesto ya que hay una sola cola. Mirar el tamaño de la cola es una forma básica de empezar a devolver errores antes de que tu servidor web falle bajo carga. ### Bloqueo del hilo principal Actualmente, PyTorch no es consciente de la asincronía, y el cálculo bloqueará el hilo principal mientras se ejecuta. Esto significa que sería mejor si PyTorch se viera obligado a ejecutarse en su propio hilo/proceso. Esto no se hizo aquí porque el código es mucho más complejo (principalmente porque los hilos, la asincronía y las colas no se llevan bien juntos). Pero en última instancia, hace lo mismo. Esto sería importante si la inferencia de elementos individuales fuera larga (> 1s) porque en este caso, significa que cada consulta durante la inferencia tendría que esperar 1s antes de recibir incluso un error. ### Procesamiento por lotes dinámico En general, el procesamiento por lotes no es necesariamente una mejora respecto a pasar 1 elemento a la vez (ver [procesamiento por lotes](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching) para más información). Pero puede ser muy efectivo cuando se usa en el entorno correcto. En la API, no hay procesamiento por lotes dinámico por defecto (demasiada oportunidad para una desaceleración). Pero para la inferencia de BLOOM - que es un modelo muy grande - el procesamiento por lotes dinámico es **esencial** para proporcionar una experiencia decente para todos.
transformers/docs/source/es/pipeline_webserver.md/0
{ "file_path": "transformers/docs/source/es/pipeline_webserver.md", "repo_id": "transformers", "token_count": 2575 }
289
- sections: - local: index title: 🤗 Transformers - local: quicktour title: Tour rapido - local: installation title: Installazione title: Iniziare - sections: - local: pipeline_tutorial title: Pipeline per l'inferenza - local: autoclass_tutorial title: Carica istanze pre-allenate con AutoClass - local: preprocessing title: Preprocess - local: training title: Fine-tuning di un modello pre-addestrato - local: accelerate title: Allenamento distribuito con 🤗 Accelerate - local: model_sharing title: Condividere un modello title: Esercitazione - sections: - local: create_a_model title: Crea un'architettura personalizzata - local: custom_models title: Condividere modelli personalizzati - local: run_scripts title: Addestramento con script - local: multilingual title: Modelli multilingua per l'inferenza - local: converting_tensorflow_models title: Convertire modelli tensorflow - local: serialization title: Esporta modelli Transformers - local: perf_train_cpu title: Addestramento efficiente su CPU - local: perf_train_cpu_many title: Addestramento efficiente su multiple CPU - local: perf_train_tpu title: Addestramento su TPU - local: perf_train_special title: Addestramento su Hardware Specializzato - local: perf_infer_cpu title: Inferenza Efficiente su CPU - local: perf_infer_gpu_one title: Inferenza su una GPU - local: perf_infer_gpu_many title: Inferenza Efficiente su GPU Multiple - local: perf_infer_special title: Inferenza su Hardware Specializzato - local: big_models title: Istanziare un big model - local: migration title: Passaggio da pacchetti precedenti - local: debugging title: Debugging title: Guide pratiche - sections: - local: add_new_pipeline title: Come aggiungere una pipeline a 🤗 Transformers? - local: add_new_model title: Come aggiungere un modello a 🤗 Transformers? - local: perf_hardware title: Hardware ottimizzato per l'addestramento - local: community title: Risorse della comunità - local: pr_checks title: Controlli su una Pull Request title: Guide How-to
transformers/docs/source/it/_toctree.yml/0
{ "file_path": "transformers/docs/source/it/_toctree.yml", "repo_id": "transformers", "token_count": 771 }
290
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Hardware ottimizzato per l'addestramento L'hardware utilizzato per eseguire l'addestramento del modello e l'inferenza può avere un grande effetto sulle prestazioni. Per un analisi approfondita delle GPUs, assicurati di dare un'occhiata all'eccellente [blog post](https://timdettmers.com/2020/09/07/which-gpu-for-deep-learning/) di Tim Dettmer. Diamo un'occhiata ad alcuni consigli pratici per la configurazione della GPU. ## GPU Quando si addestrano modelli più grandi ci sono essenzialmente tre opzioni: - GPUs piu' grandi - Piu' GPUs - Piu' CPU e piu' NVMe (scaricato da [DeepSpeed-Infinity](main_classes/deepspeed#nvme-support)) Iniziamo dal caso in cui ci sia una singola GPU. ### Potenza e Raffreddamento Se hai acquistato una costosa GPU di fascia alta, assicurati di darle la potenza corretta e un raffreddamento sufficiente. **Potenza**: Alcune schede GPU consumer di fascia alta hanno 2 e talvolta 3 prese di alimentazione PCI-E a 8 pin. Assicurati di avere tanti cavi PCI-E a 8 pin indipendenti da 12 V collegati alla scheda quante sono le prese. Non utilizzare le 2 fessure a un'estremità dello stesso cavo (noto anche come cavo a spirale). Cioè se hai 2 prese sulla GPU, vuoi 2 cavi PCI-E a 8 pin che vanno dall'alimentatore alla scheda e non uno che abbia 2 connettori PCI-E a 8 pin alla fine! In caso contrario, non otterrai tutte le prestazioni ufficiali. Ciascun cavo di alimentazione PCI-E a 8 pin deve essere collegato a una guida da 12 V sul lato dell'alimentatore e può fornire fino a 150 W di potenza. Alcune altre schede possono utilizzare connettori PCI-E a 12 pin e questi possono fornire fino a 500-600 W di potenza. Le schede di fascia bassa possono utilizzare connettori a 6 pin, che forniscono fino a 75 W di potenza. Inoltre vuoi un alimentatore (PSU) di fascia alta che abbia una tensione stabile. Alcuni PSU di qualità inferiore potrebbero non fornire alla scheda la tensione stabile di cui ha bisogno per funzionare al massimo. E ovviamente l'alimentatore deve avere abbastanza Watt inutilizzati per alimentare la scheda. **Raffreddamento**: Quando una GPU si surriscalda, inizierà a rallentare e non fornirà le prestazioni mssimali e potrebbe persino spegnersi se diventasse troppo calda. È difficile dire l'esatta temperatura migliore a cui aspirare quando una GPU è molto caricata, ma probabilmente qualsiasi cosa al di sotto di +80°C va bene, ma più bassa è meglio - forse 70-75°C è un intervallo eccellente in cui trovarsi. È probabile che il rallentamento inizi a circa 84-90°C. Ma oltre alla limitazione delle prestazioni, una temperatura molto elevata prolungata è probabile che riduca la durata di una GPU. Diamo quindi un'occhiata a uno degli aspetti più importanti quando si hanno più GPU: la connettività. ### Connettività multi-GPU Se utilizzi più GPU, il modo in cui le schede sono interconnesse può avere un enorme impatto sul tempo totale di allenamento. Se le GPU si trovano sullo stesso nodo fisico, puoi eseguire: ```bash nvidia-smi topo -m ``` e ti dirà come sono interconnesse le GPU. Su una macchina con doppia GPU e collegata a NVLink, molto probabilmente vedrai qualcosa del tipo: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X NV2 0-23 N/A GPU1 NV2 X 0-23 N/A ``` su una macchina diversa senza NVLink potremmo vedere: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X PHB 0-11 N/A GPU1 PHB X 0-11 N/A ``` Il rapporto include questa legenda: ``` X = Self SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI) NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU) PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge) PIX = Connection traversing at most a single PCIe bridge NV# = Connection traversing a bonded set of # NVLinks ``` Quindi il primo rapporto `NV2` ci dice che le GPU sono interconnesse con 2 NVLinks e nel secondo report `PHB` abbiamo una tipica configurazione PCIe+Bridge a livello di consumatore. Controlla che tipo di connettività hai sulla tua configurazione. Alcuni di questi renderanno la comunicazione tra le carte più veloce (es. NVLink), altri più lenta (es. PHB). A seconda del tipo di soluzione di scalabilità utilizzata, la velocità di connettività potrebbe avere un impatto maggiore o minore. Se le GPU devono sincronizzarsi raramente, come in DDP, l'impatto di una connessione più lenta sarà meno significativo. Se le GPU devono scambiarsi messaggi spesso, come in ZeRO-DP, una connettività più veloce diventa estremamente importante per ottenere un addestramento più veloce. #### NVlink [NVLink](https://en.wikipedia.org/wiki/NVLink) è un collegamento di comunicazione a corto raggio multilinea seriale basato su cavo sviluppato da Nvidia. Ogni nuova generazione fornisce una larghezza di banda più veloce, ad es. ecco una citazione da [Nvidia Ampere GA102 GPU Architecture](https://www.nvidia.com/content/dam/en-zz/Solutions/geforce/ampere/pdf/NVIDIA-ampere-GA102-GPU-Architecture-Whitepaper-V1.pdf): > Third-Generation NVLink® > GA102 GPUs utilize NVIDIA’s third-generation NVLink interface, which includes four x4 links, > with each link providing 14.0625 GB/sec bandwidth in each direction between two GPUs. Four > links provide 56.25 GB/sec bandwidth in each direction, and 112.5 GB/sec total bandwidth > between two GPUs. Two RTX 3090 GPUs can be connected together for SLI using NVLink. > (Note that 3-Way and 4-Way SLI configurations are not supported.) Quindi più `X` si ottiene nel rapporto di `NVX` nell'output di `nvidia-smi topo -m`, meglio è. La generazione dipenderà dall'architettura della tua GPU. Confrontiamo l'esecuzione di un training del modello di linguaggio openai-community/gpt2 su un piccolo campione di wikitext I risultati sono: | NVlink | Time | | ----- | ---: | | Y | 101s | | N | 131s | Puoi vedere che NVLink completa l'addestramento circa il 23% più velocemente. Nel secondo benchmark utilizziamo `NCCL_P2P_DISABLE=1` per dire alle GPU di non utilizzare NVLink. Ecco il codice benchmark completo e gli output: ```bash # DDP w/ NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} # DDP w/o NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69} ``` Hardware: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks (`NV2` in `nvidia-smi topo -m`) Software: `pytorch-1.8-to-be` + `cuda-11.0` / `transformers==4.3.0.dev0`
transformers/docs/source/it/perf_hardware.md/0
{ "file_path": "transformers/docs/source/it/perf_hardware.md", "repo_id": "transformers", "token_count": 3024 }
291
- sections: - local: index title: 🤗 Transformers - local: quicktour title: クイックツアー - local: installation title: インストール title: Get started - sections: - local: pipeline_tutorial title: パイプラインを使用して推論を実行する - local: autoclass_tutorial title: AutoClass を使用して移植可能なコードを作成する - local: preprocessing title: データの前処理 - local: training title: 事前トレーニングされたモデルを微調整する - local: run_scripts title: スクリプトを使用してトレーニングする - local: accelerate title: 🤗 Accelerate を使用して分散トレーニングをセットアップする - local: peft title: 🤗 PEFT を使用してアダプターをロードしてトレーニングする - local: model_sharing title: モデルを共有する - local: transformers_agents title: エージェント - local: llm_tutorial title: LLM を使用した生成 title: Tutorials - sections: - isExpanded: false sections: - local: tasks/sequence_classification title: テキストの分類 - local: tasks/token_classification title: トークンの分類 - local: tasks/question_answering title: 質疑応答 - local: tasks/language_modeling title: 因果言語モデリング - local: tasks/masked_language_modeling title: マスクされた言語モデリング - local: tasks/translation title: 翻訳 - local: tasks/summarization title: 要約 - local: tasks/multiple_choice title: 複数の選択肢 title: 自然言語処理 - isExpanded: false sections: - local: tasks/audio_classification title: 音声の分類 - local: tasks/asr title: 自動音声認識 title: オーディオ - isExpanded: false sections: - local: tasks/image_classification title: 画像分類 - local: tasks/semantic_segmentation title: セマンティックセグメンテーション - local: tasks/video_classification title: ビデオの分類 - local: tasks/object_detection title: 物体検出 - local: tasks/zero_shot_object_detection title: ゼロショット物体検出 - local: tasks/zero_shot_image_classification title: ゼロショット画像分類 - local: tasks/monocular_depth_estimation title: 深さの推定 - local: tasks/image_to_image title: 画像から画像へ - local: tasks/knowledge_distillation_for_image_classification title: コンピュータビジョンのための知識の蒸留 title: コンピュータビジョン - isExpanded: false sections: - local: tasks/image_captioning title: 画像のキャプション - local: tasks/document_question_answering title: 文書の質問への回答 - local: tasks/visual_question_answering title: 視覚的な質問への回答 - local: tasks/text-to-speech title: テキスト読み上げ title: マルチモーダル - isExpanded: false sections: - local: generation_strategies title: 生成戦略をカスタマイズする title: 世代 - isExpanded: false sections: - local: tasks/idefics title: IDEFICS を使用したイメージ タスク - local: tasks/prompting title: LLM プロンプト ガイド title: プロンプト title: Task Guides - sections: - local: fast_tokenizers title: 🤗 トークナイザーの高速トークナイザーを使用する - local: multilingual title: 多言語モデルで推論を実行する - local: create_a_model title: モデル固有の API を使用する - local: custom_models title: カスタムモデルを共有する - local: chat_templating title: チャットモデルのテンプレート - local: serialization title: ONNX へのエクスポート - local: tflite title: TFLite へのエクスポート - local: torchscript title: トーチスクリプトへのエクスポート - local: benchmarks title: ベンチマーク - local: community title: コミュニティリソース - local: custom_tools title: カスタムツールとプロンプト - local: troubleshooting title: トラブルシューティング title: 開発者ガイド - sections: - local: performance title: 概要 - sections: - local: perf_train_gpu_one title: 単一の GPU で効率的にトレーニングするための方法とツール - local: perf_train_gpu_many title: 複数の GPU と並列処理 - local: perf_train_cpu title: CPU での効率的なトレーニング - local: perf_train_cpu_many title: 分散CPUトレーニング - local: perf_train_tpu title: TPU に関するトレーニング - local: perf_train_tpu_tf title: TensorFlow を使用した TPU のトレーニング - local: perf_train_special title: 特殊なハードウェアに関するトレーニング - local: perf_hardware title: トレーニング用のカスタム ハードウェア - local: hpo_train title: Trainer API を使用したハイパーパラメータ検索 title: 効率的なトレーニングテクニック - sections: - local: perf_infer_cpu title: CPUでの推論 - local: perf_infer_gpu_one title: 1 つの GPU での推論 - local: perf_infer_gpu_many title: 多くの GPU での推論 - local: perf_infer_special title: 特殊なハードウェアでの推論 title: 推論の最適化 - local: big_models title: 大きなモデルのインスタンス化 - local: tf_xla title: TensorFlowモデルのXLA統合 - local: perf_torch_compile title: torch.compile()を使用した推論の最適化 title: パフォーマンスとスケーラビリティ - sections: - local: add_new_model title: 🤗 Transformersにモデルを追加する方法 - local: testing title: テスト - local: pr_checks title: プルリクエストのチェック title: 貢献する - sections: - local: philosophy title: フィロソフィー - local: glossary title: 用語集 - local: task_summary title: 🤗 Transformersの機能 - local: tasks_explained title: 🤗 Transformersがタスクを解決する方法 - local: model_summary title: Transformerモデルファミリー - local: tokenizer_summary title: トークナイザーの概要 - local: attention title: 注意機構 - local: pad_truncation title: パディングと切り詰め - local: bertology title: BERTology - local: perplexity title: 固定長モデルのパープレキシティ - local: pipeline_webserver title: Webサーバー推論用パイプライン - local: model_memory_anatomy title: モデルトレーニングの解剖学 title: コンセプチュアルガイド - sections: - sections: - local: main_classes/agent title: エージェントとツール - local: model_doc/auto title: Auto Classes - local: main_classes/callback title: コールバック - local: main_classes/configuration title: 構成 - local: main_classes/data_collator title: データ照合者 - local: main_classes/keras_callbacks title: Keras コールバック - local: main_classes/logging title: ロギング - local: main_classes/model title: モデル - local: main_classes/text_generation title: テキストの生成 - local: main_classes/onnx title: ONNX - local: main_classes/optimizer_schedules title: 最適化 - local: main_classes/output title: モデルの出力 - local: main_classes/pipelines title: パイプライン - local: main_classes/processors title: プロセッサー - local: main_classes/quantization title: 量子化 - local: main_classes/tokenizer title: トークナイザー - local: main_classes/trainer title: トレーナー - local: main_classes/deepspeed title: ディープスピードの統合 - local: main_classes/feature_extractor title: 特徴抽出器 - local: main_classes/image_processor title: 画像処理プロセッサ title: 主要なクラス - sections: - isExpanded: false sections: - local: model_doc/albert title: ALBERT - local: model_doc/bart title: BART - local: model_doc/barthez title: BARThez - local: model_doc/bartpho title: BARTpho - local: model_doc/bert title: BERT - local: model_doc/bert-generation title: BertGeneration - local: model_doc/bert-japanese title: BertJapanese - local: model_doc/bertweet title: Bertweet - local: model_doc/big_bird title: BigBird - local: model_doc/bigbird_pegasus title: BigBirdPegasus - local: model_doc/biogpt title: BioGpt - local: model_doc/blenderbot title: Blenderbot - local: model_doc/blenderbot-small title: Blenderbot Small - local: model_doc/bloom title: BLOOM - local: model_doc/bort title: BORT - local: model_doc/byt5 title: ByT5 - local: model_doc/camembert title: CamemBERT - local: model_doc/canine title: CANINE - local: model_doc/codegen title: CodeGen - local: model_doc/code_llama title: CodeLlama - local: model_doc/convbert title: ConvBERT - local: model_doc/cpm title: CPM - local: model_doc/cpmant title: CPMANT - local: model_doc/ctrl title: CTRL - local: model_doc/deberta title: DeBERTa - local: model_doc/deberta-v2 title: DeBERTa-v2 - local: model_doc/dialogpt title: DialoGPT title: 文章モデル - isExpanded: false sections: - local: model_doc/beit title: BEiT - local: model_doc/bit title: BiT - local: model_doc/conditional_detr title: Conditional DETR - local: model_doc/convnext title: ConvNeXT - local: model_doc/convnextv2 title: ConvNeXTV2 - local: model_doc/cvt title: CvT - local: model_doc/deformable_detr title: Deformable DETR - local: model_doc/deit title: DeiT - local: model_doc/deta title: DETA - local: model_doc/detr title: DETR - local: model_doc/dinat title: DiNAT title: ビジョンモデル - isExpanded: false sections: - local: model_doc/audio-spectrogram-transformer title: Audio Spectrogram Transformer - local: model_doc/bark title: Bark - local: model_doc/clap title: CLAP title: 音声モデル - isExpanded: false sections: - local: model_doc/align title: ALIGN - local: model_doc/altclip title: AltCLIP - local: model_doc/blip title: BLIP - local: model_doc/blip-2 title: BLIP-2 - local: model_doc/bridgetower title: BridgeTower - local: model_doc/bros title: BROS - local: model_doc/chinese_clip title: Chinese-CLIP - local: model_doc/clip title: CLIP - local: model_doc/clipseg title: CLIPSeg - local: model_doc/clvp title: CLVP - local: model_doc/data2vec title: Data2Vec - local: model_doc/deplot title: DePlot title: マルチモーダルモデル - isExpanded: false sections: - local: model_doc/decision_transformer title: Decision Transformer title: 強化学習モデル - isExpanded: false sections: - local: model_doc/autoformer title: Autoformer title: 時系列モデル title: モデル - sections: - local: internal/modeling_utils title: カスタムレイヤーとユーティリティ - local: internal/pipelines_utils title: パイプライン用のユーティリティ - local: internal/tokenization_utils title: ト=ークナイザー用のユーティリティ - local: internal/trainer_utils title: トレーナー用ユーティリティ - local: internal/generation_utils title: 発電用ユーティリティ - local: internal/image_processing_utils title: 画像プロセッサ用ユーティリティ - local: internal/audio_utils title: オーディオ処理用のユーティリティ - local: internal/file_utils title: 一般公共事業 - local: internal/time_series_utils title: 時系列用のユーティリティ title: 内部ヘルパー title: API
transformers/docs/source/ja/_toctree.yml/0
{ "file_path": "transformers/docs/source/ja/_toctree.yml", "repo_id": "transformers", "token_count": 5783 }
292
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Hyperparameter Search using Trainer API 🤗 Transformersは、🤗 Transformersモデルのトレーニングを最適化する[`Trainer`]クラスを提供し、独自のトレーニングループを手動で記述せずにトレーニングを開始するのが簡単になります。[`Trainer`]はハイパーパラメーター検索のAPIも提供しています。このドキュメントでは、それを例示します。 ## Hyperparameter Search backend [`Trainer`]は現在、4つのハイパーパラメーター検索バックエンドをサポートしています: [optuna](https://optuna.org/)、[sigopt](https://sigopt.com/)、[raytune](https://docs.ray.io/en/latest/tune/index.html)、および[wandb](https://wandb.ai/site/sweeps)。 これらを使用する前に、ハイパーパラメーター検索バックエンドをインストールする必要があります。 ```bash pip install optuna/sigopt/wandb/ray[tune] ``` ## How to enable Hyperparameter search in example ハイパーパラメータの検索スペースを定義し、異なるバックエンドには異なるフォーマットが必要です。 Sigoptの場合、sigopt [object_parameter](https://docs.sigopt.com/ai-module-api-references/api_reference/objects/object_parameter) を参照してください。それは以下のようなものです: ```py >>> def sigopt_hp_space(trial): ... return [ ... {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double"}, ... { ... "categorical_values": ["16", "32", "64", "128"], ... "name": "per_device_train_batch_size", ... "type": "categorical", ... }, ... ] ``` Optunaに関しては、[object_parameter](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html#sphx-glr-tutorial-10-key-features-002-configurations-py)をご覧ください。以下のようになります: ```py >>> def optuna_hp_space(trial): ... return { ... "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), ... "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [16, 32, 64, 128]), ... } ``` Optunaは、多目的のハイパーパラメータ最適化(HPO)を提供しています。 `hyperparameter_search` で `direction` を渡し、複数の目的関数値を返すための独自の `compute_objective` を定義することができます。 Pareto Front(`List[BestRun]`)は `hyperparameter_search` で返され、[test_trainer](https://github.com/huggingface/transformers/blob/main/tests/trainer/test_trainer.py) のテストケース `TrainerHyperParameterMultiObjectOptunaIntegrationTest` を参照する必要があります。これは以下のようになります。 ```py >>> best_trials = trainer.hyperparameter_search( ... direction=["minimize", "maximize"], ... backend="optuna", ... hp_space=optuna_hp_space, ... n_trials=20, ... compute_objective=compute_objective, ... ) ``` Ray Tuneに関して、[object_parameter](https://docs.ray.io/en/latest/tune/api/search_space.html)を参照してください。以下のようになります: ```py >>> def ray_hp_space(trial): ... return { ... "learning_rate": tune.loguniform(1e-6, 1e-4), ... "per_device_train_batch_size": tune.choice([16, 32, 64, 128]), ... } ``` Wandbについては、[object_parameter](https://docs.wandb.ai/guides/sweeps/configuration)をご覧ください。これは以下のようになります: ```py >>> def wandb_hp_space(trial): ... return { ... "method": "random", ... "metric": {"name": "objective", "goal": "minimize"}, ... "parameters": { ... "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, ... "per_device_train_batch_size": {"values": [16, 32, 64, 128]}, ... }, ... } ``` `model_init` 関数を定義し、それを [`Trainer`] に渡す例を示します: ```py >>> def model_init(trial): ... return AutoModelForSequenceClassification.from_pretrained( ... model_args.model_name_or_path, ... from_tf=bool(".ckpt" in model_args.model_name_or_path), ... config=config, ... cache_dir=model_args.cache_dir, ... revision=model_args.model_revision, ... token=True if model_args.use_auth_token else None, ... ) ``` [`Trainer`] を `model_init` 関数、トレーニング引数、トレーニングデータセット、テストデータセット、および評価関数と共に作成してください: ```py >>> trainer = Trainer( ... model=None, ... args=training_args, ... train_dataset=small_train_dataset, ... eval_dataset=small_eval_dataset, ... compute_metrics=compute_metrics, ... tokenizer=tokenizer, ... model_init=model_init, ... data_collator=data_collator, ... ) ``` ハイパーパラメーターの探索を呼び出し、最良のトライアル パラメーターを取得します。バックエンドは `"optuna"` / `"sigopt"` / `"wandb"` / `"ray"` である可能性があります。方向は `"minimize"` または `"maximize"` であり、目標をより大きくするか小さくするかを示します。 `compute_objective` 関数を独自に定義することもできます。定義されていない場合、デフォルトの `compute_objective` が呼び出され、F1などの評価メトリックの合計が目標値として返されます。 ```py >>> best_trial = trainer.hyperparameter_search( ... direction="maximize", ... backend="optuna", ... hp_space=optuna_hp_space, ... n_trials=20, ... compute_objective=compute_objective, ... ) ``` ## Hyperparameter search For DDP finetune 現在、DDP(Distributed Data Parallel)のためのハイパーパラメーター検索は、Optuna と SigOpt に対して有効になっています。ランクゼロプロセスのみが検索トライアルを生成し、他のランクに引数を渡します。
transformers/docs/source/ja/hpo_train.md/0
{ "file_path": "transformers/docs/source/ja/hpo_train.md", "repo_id": "transformers", "token_count": 2841 }
293
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ALBERT <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=albert"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-albert-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/albert-base-v2"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## 概要 ALBERTモデルは、「[ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942)」という論文でZhenzhong Lan、Mingda Chen、Sebastian Goodman、Kevin Gimpel、Piyush Sharma、Radu Soricutによって提案されました。BERTのメモリ消費を減らしトレーニングを高速化するためのパラメータ削減技術を2つ示しています: - 埋め込み行列を2つの小さな行列に分割する。 - グループ間で分割された繰り返し層を使用する。 論文の要旨は以下の通りです: *自然言語表現の事前学習時にモデルのサイズを増やすと、下流タスクのパフォーマンスが向上することがしばしばあります。しかし、ある時点でさらなるモデルの増大は、GPU/TPUのメモリ制限、長い訓練時間、予期せぬモデルの劣化といった問題のために困難になります。これらの問題に対処するために、我々はBERTのメモリ消費を低減し、訓練速度を高めるための2つのパラメータ削減技術を提案します。包括的な実証的証拠は、我々の提案方法が元のBERTに比べてはるかによくスケールするモデルを生み出すことを示しています。また、文間の一貫性をモデリングに焦点を当てた自己教師あり損失を使用し、複数の文が含まれる下流タスクに一貫して助けとなることを示します。その結果、我々の最良のモデルは、BERT-largeに比べてパラメータが少ないにもかかわらず、GLUE、RACE、SQuADベンチマークで新たな最先端の結果を確立します。* このモデルは[lysandre](https://huggingface.co/lysandre)により提供されました。このモデルのjaxバージョンは[kamalkraj](https://huggingface.co/kamalkraj)により提供されました。オリジナルのコードは[こちら](https://github.com/google-research/ALBERT)で見ることができます。 ## 使用上のヒント - ALBERTは絶対位置埋め込みを使用するモデルなので、通常、入力を左側ではなく右側にパディングすることが推奨されます。 - ALBERTは繰り返し層を使用するためメモリ使用量は小さくなりますが、同じ数の(繰り返し)層を反復しなければならないため、隠れ層の数が同じであればBERTのようなアーキテクチャと同様の計算コストがかかります。 - 埋め込みサイズEは隠れサイズHと異なりますが、これは埋め込みが文脈に依存しない(一つの埋め込みベクトルが一つのトークンを表す)のに対し、隠れ状態は文脈に依存する(1つの隠れ状態がトークン系列を表す)ため、H >> Eとすることがより論理的です。また、埋め込み行列のサイズはV x Eと大きいです(Vは語彙サイズ)。E < Hであれば、パラメータは少なくなります。 - 層はパラメータを共有するグループに分割されています(メモリ節約のため)。次文予測(NSP: Next Sentence Prediction)は文の順序予測に置き換えられます:入力では、2つの文AとB(それらは連続している)があり、Aに続いてBを与えるか、Bに続いてAを与えます。モデルはそれらが入れ替わっているかどうかを予測する必要があります。 ## 参考資料 - [テキスト分類タスクガイド](../tasks/sequence_classification) - [トークン分類タスクガイド](../tasks/token_classification) - [質問応答タスクガイド](../tasks/question_answering) - [マスクされた言語モデルタスクガイド](../tasks/masked_language_modeling) - [多肢選択タスクガイド](../tasks/multiple_choice) ## AlbertConfig [[autodoc]] AlbertConfig ## AlbertTokenizer [[autodoc]] AlbertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## AlbertTokenizerFast [[autodoc]] AlbertTokenizerFast ## Albert specific outputs [[autodoc]] models.albert.modeling_albert.AlbertForPreTrainingOutput [[autodoc]] models.albert.modeling_tf_albert.TFAlbertForPreTrainingOutput <frameworkcontent> <pt> ## AlbertModel [[autodoc]] AlbertModel - forward ## AlbertForPreTraining [[autodoc]] AlbertForPreTraining - forward ## AlbertForMaskedLM [[autodoc]] AlbertForMaskedLM - forward ## AlbertForSequenceClassification [[autodoc]] AlbertForSequenceClassification - forward ## AlbertForMultipleChoice [[autodoc]] AlbertForMultipleChoice ## AlbertForTokenClassification [[autodoc]] AlbertForTokenClassification - forward ## AlbertForQuestionAnswering [[autodoc]] AlbertForQuestionAnswering - forward </pt> <tf> ## TFAlbertModel [[autodoc]] TFAlbertModel - call ## TFAlbertForPreTraining [[autodoc]] TFAlbertForPreTraining - call ## TFAlbertForMaskedLM [[autodoc]] TFAlbertForMaskedLM - call ## TFAlbertForSequenceClassification [[autodoc]] TFAlbertForSequenceClassification - call ## TFAlbertForMultipleChoice [[autodoc]] TFAlbertForMultipleChoice - call ## TFAlbertForTokenClassification [[autodoc]] TFAlbertForTokenClassification - call ## TFAlbertForQuestionAnswering [[autodoc]] TFAlbertForQuestionAnswering - call </tf> <jax> ## FlaxAlbertModel [[autodoc]] FlaxAlbertModel - __call__ ## FlaxAlbertForPreTraining [[autodoc]] FlaxAlbertForPreTraining - __call__ ## FlaxAlbertForMaskedLM [[autodoc]] FlaxAlbertForMaskedLM - __call__ ## FlaxAlbertForSequenceClassification [[autodoc]] FlaxAlbertForSequenceClassification - __call__ ## FlaxAlbertForMultipleChoice [[autodoc]] FlaxAlbertForMultipleChoice - __call__ ## FlaxAlbertForTokenClassification [[autodoc]] FlaxAlbertForTokenClassification - __call__ ## FlaxAlbertForQuestionAnswering [[autodoc]] FlaxAlbertForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/ja/model_doc/albert.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/albert.md", "repo_id": "transformers", "token_count": 2960 }
294
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BigBirdPegasus ## Overview BigBird モデルは、[Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) で提案されました。 ザヒール、マンジルとグルガネシュ、グルとダベイ、クマール・アヴィナヴァとエインズリー、ジョシュアとアルベルティ、クリスとオンタノン、 サンティアゴとファム、フィリップとラブラ、アニルードとワン、キーファンとヤン、リーなど。 BigBird は注目度が低い BERT などの Transformer ベースのモデルをさらに長いシーケンスに拡張する、Transformer ベースのモデル。まばらに加えて アテンションと同様に、BigBird は入力シーケンスにランダム アテンションだけでなくグローバル アテンションも適用します。理論的には、 まばらで全体的でランダムな注意を適用すると、完全な注意に近づくことが示されていますが、 長いシーケンスでは計算効率が大幅に向上します。より長いコンテキストを処理できる機能の結果として、 BigBird は、質問応答や BERT または RoBERTa と比較した要約。 論文の要約は次のとおりです。 *BERT などのトランスフォーマーベースのモデルは、NLP で最も成功した深層学習モデルの 1 つです。 残念ながら、それらの中核的な制限の 1 つは、シーケンスに対する二次依存性 (主にメモリに関する) です。 完全な注意メカニズムによる長さです。これを解決するために、BigBird は、まばらな注意メカニズムを提案します。 この二次依存関係を線形に削減します。 BigBird がシーケンス関数の汎用近似器であることを示します。 チューリングは完全であるため、二次完全注意モデルのこれらの特性が保存されます。途中、私たちの 理論分析により、O(1) 個のグローバル トークン (CLS など) を持つ利点の一部が明らかになり、 スパース注意メカニズムの一部としてのシーケンス。提案されたスパース アテンションは、次の長さのシーケンスを処理できます。 同様のハードウェアを使用して以前に可能であったものの 8 倍。より長いコンテキストを処理できる機能の結果として、 BigBird は、質問応答や要約などのさまざまな NLP タスクのパフォーマンスを大幅に向上させます。私達も ゲノミクスデータへの新しいアプリケーションを提案します。* ## Usage tips - BigBird の注意がどのように機能するかについての詳細な説明については、[このブログ投稿](https://huggingface.co/blog/big-bird) を参照してください。 - BigBird には、**original_full** と **block_sparse** の 2 つの実装が付属しています。シーケンス長が 1024 未満の場合、次を使用します。 **block_sparse** を使用してもメリットがないため、**original_full** を使用することをお勧めします。 - コードは現在、3 ブロックと 2 グローバル ブロックのウィンドウ サイズを使用しています。 - シーケンスの長さはブロック サイズで割り切れる必要があります。 - 現在の実装では **ITC** のみがサポートされています。 - 現在の実装では **num_random_blocks = 0** はサポートされていません。 - BigBirdPegasus は [PegasusTokenizer](https://github.com/huggingface/transformers/blob/main/src/transformers/models/pegasus/tokenization_pegasus.py) を使用します。 - BigBird は絶対位置埋め込みを備えたモデルであるため、通常は入力を右側にパディングすることをお勧めします。 左。 元のコードは [こちら](https://github.com/google-research/bigbird) にあります。 ## ドキュメント リソース - [テキスト分類タスクガイド](../tasks/sequence_classification) - [質問回答タスク ガイド](../tasks/question_answering) - [因果言語モデリング タスク ガイド](../tasks/language_modeling) - [翻訳タスクガイド](../tasks/translation) - [要約タスクガイド](../tasks/summarization) ## BigBirdPegasusConfig [[autodoc]] BigBirdPegasusConfig - all ## BigBirdPegasusModel [[autodoc]] BigBirdPegasusModel - forward ## BigBirdPegasusForConditionalGeneration [[autodoc]] BigBirdPegasusForConditionalGeneration - forward ## BigBirdPegasusForSequenceClassification [[autodoc]] BigBirdPegasusForSequenceClassification - forward ## BigBirdPegasusForQuestionAnswering [[autodoc]] BigBirdPegasusForQuestionAnswering - forward ## BigBirdPegasusForCausalLM [[autodoc]] BigBirdPegasusForCausalLM - forward
transformers/docs/source/ja/model_doc/bigbird_pegasus.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bigbird_pegasus.md", "repo_id": "transformers", "token_count": 2264 }
295
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CLIP ## Overview CLIP モデルは、Alec Radford、Jong Wook Kim、Chris Hallacy、Aditya Ramesh、Gabriel Goh Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) で提案されました。 サンディニ・アガルワル、ギリッシュ・サストリー、アマンダ・アスケル、パメラ・ミシュキン、ジャック・クラーク、グレッチェン・クルーガー、イリヤ・サツケヴァー。クリップ (Contrastive Language-Image Pre-Training) は、さまざまな (画像、テキスト) ペアでトレーニングされたニューラル ネットワークです。かもね 直接最適化することなく、与えられた画像から最も関連性の高いテキスト スニペットを予測するように自然言語で指示されます。 GPT-2 および 3 のゼロショット機能と同様に、タスクに対して。 論文の要約は次のとおりです。 *最先端のコンピューター ビジョン システムは、あらかじめ定められたオブジェクト カテゴリの固定セットを予測するようにトレーニングされています。これ 制限された形式の監視では、指定するために追加のラベル付きデータが必要となるため、一般性と使いやすさが制限されます。 その他の視覚的なコンセプト。画像に関する生のテキストから直接学習することは、 より広範な監督源。どのキャプションが表示されるかを予測するという単純な事前トレーニング タスクが有効であることを示します。 400 のデータセットで SOTA 画像表現を最初から学習するための効率的かつスケーラブルな方法はどの画像ですか インターネットから収集された数百万の(画像、テキスト)ペア。事前トレーニング後、自然言語を使用して参照します。 視覚的な概念を学習し(または新しい概念を説明し)、下流のタスクへのモデルのゼロショット転送を可能にします。私たちは勉強します 30 を超えるさまざまな既存のコンピューター ビジョン データセットでタスクをまたがってベンチマークを行うことにより、このアプローチのパフォーマンスを評価します。 OCR、ビデオ内のアクション認識、地理的位置特定、およびさまざまな種類のきめ細かいオブジェクト分類など。の モデルはほとんどのタスクに簡単に移行でき、多くの場合、必要がなくても完全に監視されたベースラインと競合します。 データセット固有のトレーニングに適しています。たとえば、ImageNet ゼロショットではオリジナルの ResNet-50 の精度と一致します。 トレーニングに使用された 128 万のトレーニング サンプルを使用する必要はありません。コードをリリースし、事前トレーニング済み モデルの重みはこの https URL で確認できます。* このモデルは [valhalla](https://huggingface.co/valhalla) によって提供されました。元のコードは [ここ](https://github.com/openai/CLIP) にあります。 ## Usage tips and example CLIP は、マルチモーダルなビジョンおよび言語モデルです。画像とテキストの類似性やゼロショット画像に使用できます。 分類。 CLIP は、ViT のようなトランスフォーマーを使用して視覚的特徴を取得し、因果言語モデルを使用してテキストを取得します 特徴。次に、テキストと視覚の両方の特徴が、同じ次元の潜在空間に投影されます。ドット 投影された画像とテキストの特徴間の積が同様のスコアとして使用されます。 画像を Transformer エンコーダに供給するために、各画像は固定サイズの重複しないパッチのシーケンスに分割されます。 これらは線形に埋め込まれます。 [CLS] トークンは、イメージ全体の表現として機能するために追加されます。作家たち また、絶対位置埋め込みを追加し、結果として得られるベクトルのシーケンスを標準の Transformer エンコーダに供給します。 [`CLIPImageProcessor`] を使用して、モデルの画像のサイズ変更 (または再スケール) および正規化を行うことができます。 [`CLIPTokenizer`] はテキストのエンコードに使用されます。 [`CLIPProcessor`] はラップします [`CLIPImageProcessor`] と [`CLIPTokenizer`] を両方の単一インスタンスに統合 テキストをエンコードして画像を準備します。次の例は、次のメソッドを使用して画像とテキストの類似性スコアを取得する方法を示しています。 [`CLIPProcessor`] と [`CLIPModel`]。 ```python >>> from PIL import Image >>> import requests >>> from transformers import CLIPProcessor, CLIPModel >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") >>> processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` ## Resources CLIP を使い始めるのに役立つ公式 Hugging Face およびコミュニティ (🌎 で示されている) リソースのリスト。 - [リモート センシング (衛星) 画像とキャプションを使用した CLIP の微調整](https://huggingface.co/blog/fine-tune-clip-rsicd)、[RSICD データセット] を使用して CLIP を微調整する方法に関するブログ投稿(https://github.com/201528014227051/RSICD_optimal) と、データ拡張によるパフォーマンスの変化の比較。 - この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/contrastive-image-text) は、プレ- [COCO データセット](https://cocodataset.org/#home) を使用してトレーニングされたビジョンおよびテキスト エンコーダー。 <PipelineTag pipeline="image-to-text"/> - 画像キャプションのビーム検索による推論に事前トレーニング済み CLIP を使用する方法に関する [ノートブック](https://colab.research.google.com/drive/1tuoAC5F4sC7qid56Z0ap-stR3rwdk0ZV?usp=sharing)。 🌎 **画像検索** - 事前トレーニングされた CLIP を使用した画像検索と MRR (平均相互ランク) スコアの計算に関する [ノートブック](https://colab.research.google.com/drive/1bLVwVKpAndpEDHqjzxVPr_9nGrSbuOQd?usp=sharing)。 🌎 - 画像の取得と類似性スコアの表示に関する [ノートブック](https://colab.research.google.com/github/deep-diver/image_search_with_natural_language/blob/main/notebooks/Image_Search_CLIP.ipynb)。 🌎 - 多言語 CLIP を使用して画像とテキストを同じベクトル空間にマッピングする方法に関する [ノートブック](https://colab.research.google.com/drive/1xO-wC_m_GNzgjIBQ4a4znvQkvDoZJvH4?usp=sharing)。 🌎 - を使用してセマンティック イメージ検索で CLIP を実行する方法に関する [ノートブック](https://colab.research.google.com/github/vivien000/clip-demo/blob/master/clip.ipynb#scrollTo=uzdFhRGqiWkR) [Unsplash](https://unsplash.com) および [TMDB](https://www.themoviedb.org/) データセット。 🌎 **説明可能性** - 入力トークンと画像セグメントの類似性を視覚化する方法に関する [ノートブック](https://colab.research.google.com/github/hila-chefer/Transformer-MM-Explainability/blob/main/CLIP_explainability.ipynb)。 🌎 ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。 リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 ## CLIPConfig [[autodoc]] CLIPConfig - from_text_vision_configs ## CLIPTextConfig [[autodoc]] CLIPTextConfig ## CLIPVisionConfig [[autodoc]] CLIPVisionConfig ## CLIPTokenizer [[autodoc]] CLIPTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## CLIPTokenizerFast [[autodoc]] CLIPTokenizerFast ## CLIPImageProcessor [[autodoc]] CLIPImageProcessor - preprocess ## CLIPFeatureExtractor [[autodoc]] CLIPFeatureExtractor ## CLIPProcessor [[autodoc]] CLIPProcessor <frameworkcontent> <pt> ## CLIPModel [[autodoc]] CLIPModel - forward - get_text_features - get_image_features ## CLIPTextModel [[autodoc]] CLIPTextModel - forward ## CLIPTextModelWithProjection [[autodoc]] CLIPTextModelWithProjection - forward ## CLIPVisionModelWithProjection [[autodoc]] CLIPVisionModelWithProjection - forward ## CLIPVisionModel [[autodoc]] CLIPVisionModel - forward </pt> <tf> ## TFCLIPModel [[autodoc]] TFCLIPModel - call - get_text_features - get_image_features ## TFCLIPTextModel [[autodoc]] TFCLIPTextModel - call ## TFCLIPVisionModel [[autodoc]] TFCLIPVisionModel - call </tf> <jax> ## FlaxCLIPModel [[autodoc]] FlaxCLIPModel - __call__ - get_text_features - get_image_features ## FlaxCLIPTextModel [[autodoc]] FlaxCLIPTextModel - __call__ ## FlaxCLIPTextModelWithProjection [[autodoc]] FlaxCLIPTextModelWithProjection - __call__ ## FlaxCLIPVisionModel [[autodoc]] FlaxCLIPVisionModel - __call__ </jax> </frameworkcontent>
transformers/docs/source/ja/model_doc/clip.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/clip.md", "repo_id": "transformers", "token_count": 4545 }
296
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Decision Transformer ## Overview Decision Transformer モデルは、[Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) で提案されました。 Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch. 論文の要約は次のとおりです。 *強化学習(RL)をシーケンスモデリング問題として抽象化するフレームワークを紹介します。 これにより、Transformer アーキテクチャのシンプルさとスケーラビリティ、および関連する進歩を活用できるようになります。 GPT-x や BERT などの言語モデリングで。特に、Decision Transformer というアーキテクチャを紹介します。 RL の問題を条件付きシーケンス モデリングとして投げかけます。値関数に適合する以前の RL アプローチとは異なり、 ポリシー勾配を計算すると、Decision Transformer は因果的にマスクされたアルゴリズムを利用して最適なアクションを出力するだけです。 変成器。望ましいリターン (報酬)、過去の状態、アクションに基づいて自己回帰モデルを条件付けすることにより、 Decision Transformer モデルは、望ましいリターンを達成する将来のアクションを生成できます。そのシンプルさにも関わらず、 Decision Transformer は、最先端のモデルフリーのオフライン RL ベースラインのパフォーマンスと同等、またはそれを超えています。 Atari、OpenAI Gym、Key-to-Door タスク* このバージョンのモデルは、状態がベクトルであるタスク用です。 このモデルは、[edbeeching](https://huggingface.co/edbeeching) によって提供されました。元のコードは [ここ](https://github.com/kzl/decion-transformer) にあります。 ## DecisionTransformerConfig [[autodoc]] DecisionTransformerConfig ## DecisionTransformerGPT2Model [[autodoc]] DecisionTransformerGPT2Model - forward ## DecisionTransformerModel [[autodoc]] DecisionTransformerModel - forward
transformers/docs/source/ja/model_doc/decision_transformer.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/decision_transformer.md", "repo_id": "transformers", "token_count": 1073 }
297
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Efficient Inference on a Multiple GPUs この文書には、複数のGPUで効率的に推論を行う方法に関する情報が含まれています。 <Tip> 注意: 複数のGPUセットアップは、[単一のGPUセクション](./perf_infer_gpu_one)で説明されているほとんどの戦略を使用できます。ただし、より良い使用法のために使用できる簡単なテクニックについても認識しておく必要があります。 </Tip> ## Flash Attention 2 Flash Attention 2の統合は、複数のGPUセットアップでも機能します。詳細については、[単一のGPUセクション](./perf_infer_gpu_one#Flash-Attention-2)の適切なセクションをご覧ください。 ## BetterTransformer [BetterTransformer](https://huggingface.co/docs/optimum/bettertransformer/overview)は、🤗 TransformersモデルをPyTorchネイティブの高速実行パスを使用するように変換し、その下でFlash Attentionなどの最適化されたカーネルを呼び出します。 BetterTransformerは、テキスト、画像、音声モデルの単一GPUおよび複数GPUでの高速推論もサポートしています。 <Tip> Flash Attentionは、fp16またはbf16 dtypeを使用しているモデルにのみ使用できます。BetterTransformerを使用する前に、モデルを適切なdtypeにキャストしてください。 </Tip> ### Decoder models テキストモデル、特にデコーダーベースのモデル(GPT、T5、Llamaなど)の場合、BetterTransformer APIはすべての注意操作を[`torch.nn.functional.scaled_dot_product_attention`オペレーター](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention)(SDPA)を使用するように変換します。これはPyTorch 2.0以降でのみ使用可能です。 モデルをBetterTransformerに変換するには: ```python from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m") # convert the model to BetterTransformer model.to_bettertransformer() # Use it for training or inference ``` SDPAは、ハードウェアや問題のサイズなどの特定の設定で[Flash Attention](https://arxiv.org/abs/2205.14135)カーネルを呼び出すこともできます。Flash Attentionを有効にするか、特定の設定(ハードウェア、問題のサイズ)で利用可能かを確認するには、[`torch.backends.cuda.sdp_kernel`](https://pytorch.org/docs/master/backends.html#torch.backends.cuda.sdp_kernel)をコンテキストマネージャとして使用します。 ```diff import torch from transformers import AutoModelForCausalLM, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m").to("cuda") # convert the model to BetterTransformer model.to_bettertransformer() input_text = "Hello my dog is cute and" inputs = tokenizer(input_text, return_tensors="pt").to("cuda") + with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): outputs = model.generate(**inputs) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ``` もしトレースバックで次のようなエラーメッセージが表示された場合: ```bash RuntimeError: No available kernel. Aborting execution. ``` 当日、Flash Attentionのカバレッジが広範囲である可能性があるPyTorch Nightlyバージョンを試すようにお勧めします。 ```bash pip3 install -U --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cu118 ``` [このブログ投稿](https://pytorch.org/blog/out-of-the-box-acceleration/)をチェックして、BetterTransformer + SDPA APIで可能なことについて詳しく学びましょう。 ### Encoder Models 推論中のエンコーダーモデルでは、BetterTransformerはエンコーダーレイヤーのforward呼び出しを、エンコーダーレイヤーの[`torch.nn.TransformerEncoderLayer`](https://pytorch.org/docs/stable/generated/torch.nn.TransformerEncoderLayer.html)の相当するものにディスパッチします。これにより、エンコーダーレイヤーの高速実装が実行されます。 `torch.nn.TransformerEncoderLayer`の高速実装はトレーニングをサポートしていないため、代わりに`torch.nn.functional.scaled_dot_product_attention`にディスパッチされます。これにより、ネストされたテンソルを活用しないFlash AttentionまたはMemory-Efficient Attentionの融合カーネルを使用できます。 BetterTransformerのパフォーマンスの詳細については、この[ブログ投稿](https://medium.com/pytorch/bettertransformer-out-of-the-box-performance-for-huggingface-transformers-3fbe27d50ab2)をご覧いただけます。また、エンコーダーモデル用のBetterTransformerについては、この[ブログ](https://pytorch.org/blog/a-better-transformer-for-fast-transformer-encoder-inference/)で詳しく学ぶことができます。 ## Advanced usage: mixing FP4 (or Int8) and BetterTransformer モデルの最良のパフォーマンスを得るために、上記で説明した異なる方法を組み合わせることができます。例えば、FP4ミックスプレシジョン推論+Flash Attentionを使用したBetterTransformerを組み合わせることができます。 ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig quantization_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16 ) tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m") model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", quantization_config=quantization_config) input_text = "Hello my dog is cute and" inputs = tokenizer(input_text, return_tensors="pt").to("cuda") with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): outputs = model.generate(**inputs) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) ```
transformers/docs/source/ja/perf_infer_gpu_many.md/0
{ "file_path": "transformers/docs/source/ja/perf_infer_gpu_many.md", "repo_id": "transformers", "token_count": 2561 }
298
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Checks on a Pull Request 🤗 Transformersリポジトリでプルリクエストを開くと、追加しているパッチが既存のものを壊していないことを確認するために、かなりの数のチェックが実行されます。これらのチェックには、次の4つのタイプがあります: - 通常のテスト - ドキュメンテーションのビルド - コードとドキュメンテーションのスタイル - リポジトリ全体の一貫性 このドキュメントでは、これらのさまざまなチェックとその背後にある理由、そしてそれらのいずれかがあなたのプルリクエストで失敗した場合のローカルでのデバッグ方法について説明します。 なお、理想的には、開発者用のインストールが必要です: ```bash pip install transformers[dev] ``` または編集可能なインストールの場合: ```bash pip install -e .[dev] ``` トランスフォーマーズのリポジトリ内で作業しています。トランスフォーマーズのオプションの依存関係の数が増えたため、すべてを取得できない可能性があります。開発用インストールが失敗した場合、作業しているディープラーニングフレームワーク(PyTorch、TensorFlow、および/またはFlax)をインストールし、次の手順を実行してください。 ```bash pip install transformers[quality] ``` または編集可能なインストールの場合: ```bash pip install -e .[quality] ``` ## Tests `ci/circleci: run_tests_` で始まるすべてのジョブは、Transformersのテストスイートの一部を実行します。これらのジョブは、特定の環境でライブラリの一部に焦点を当てて実行されます。たとえば、`ci/circleci: run_tests_pipelines_tf` は、TensorFlowのみがインストールされた環境でパイプラインのテストを実行します。 テストスイートの一部のみが実行されるように注意してください。テストスイートは、変更前と変更後のPRのライブラリの違いを決定し、その違いに影響を受けるテストを選択するためのユーティリティが実行されます。このユーティリティは、ローカルで以下のコマンドを実行して実行できます: ```bash python utils/tests_fetcher.py ``` 1. リポジトリのルートからスクリプトを実行します。これは次のステップを実行します: 1. 差分内の各ファイルをチェックし、変更がコード内にあるか、コメントやドキュメンテーション文字列のみにあるかを確認します。実際のコード変更があるファイルのみを保持します。 2. 内部のマップを構築します。このマップは、ライブラリのソースコードの各ファイルが再帰的に影響を与えるすべてのファイルを提供します。モジュールAがモジュールBに影響を与えるとは、モジュールBがモジュールAをインポートする場合を指します。再帰的な影響を得るには、モジュールAからモジュールBへのモジュールのチェーンが必要で、各モジュールは前のモジュールをインポートする必要があります。 3. このマップをステップ1で収集したファイルに適用します。これにより、PRに影響を受けるモデルファイルのリストが得られます。 4. これらのファイルをそれに対応するテストファイルにマップし、実行するテストのリストを取得します。 2. スクリプトをローカルで実行する場合、ステップ1、3、および4の結果が表示され、実行するテストがわかります。スクリプトはまた、`test_list.txt` という名前のファイルを作成します。このファイルには実行するテストのリストが含まれており、次のコマンドでローカルで実行できます: ```bash python -m pytest -n 8 --dist=loadfile -rA -s $(cat test_list.txt) ``` ## Documentation build `build_pr_documentation` ジョブは、ドキュメンテーションのビルドを行い、あなたのPRがマージされた後にすべてが正常に表示されることを確認します。ボットがプレビューのドキュメンテーションへのリンクをPRに追加します。PRに対する変更は、プレビューに自動的に反映されます。ドキュメンテーションのビルドに失敗した場合、失敗したジョブの隣にある「詳細」をクリックして、何が問題になっているかを確認できます。多くの場合、問題は`toctree`内のファイルが不足しているなど、単純なものです。 ドキュメンテーションをローカルでビルドまたはプレビューしたい場合は、[docsフォルダ内の`README.md`](https://github.com/huggingface/transformers/tree/main/docs)をご覧ください。 ## Code and documentation style すべてのソースファイル、例、テストには、`black`と`ruff`を使用してコードのフォーマットが適用されます。また、ドックストリングと`rst`ファイルのフォーマット、Transformersの`__init__.py`ファイルで実行される遅延インポートの順序についてもカスタムツールが存在します(`utils/style_doc.py`と`utils/custom_init_isort.py`)。これらすべては、以下を実行することで起動できます。 ```bash make style ``` CIは、`ci/circleci: check_code_quality` チェック内でこれらのチェックが適用されていることを確認します。また、`ruff` を実行し、未定義の変数や使用されていない変数がある場合にエラーを報告します。このチェックをローカルで実行するには、以下のコマンドを使用してください。 ```bash make quality ``` 時間がかかることがあります。したがって、現在のブランチで変更したファイルのみで同じことを実行するには、次のコマンドを実行します。 ```bash make fixup ``` この最後のコマンドは、リポジトリの整合性のためのすべての追加のチェックも実行します。それらを詳しく見てみましょう。 ## Repository consistency これには、あなたのPRがリポジトリを適切な状態に保ったままであることを確認するためのすべてのテストが含まれており、ci/`circleci: check_repository_consistency` チェックによって実行されます。ローカルでこのチェックを実行するには、以下を実行します。 ```bash make repo-consistency ``` これを確認します: - `utils/check_repo.py` によって実行される、init に追加されたすべてのオブジェクトが文書化されています。 - `utils/check_inits.py` によって実行される、すべての `__init__.py` ファイルがその2つのセクションで同じ内容を持っています。 - `utils/check_copies.py` によって実行される、他のモジュールからのコピーとして識別されたすべてのコードが元のコードと一致しています。 - `utils/check_config_docstrings.py` によって実行される、すべての設定クラスには少なくとも1つの有効なチェックポイントがドキュメント文字列に記載されています。 - `utils/check_config_attributes.py` によって実行される、すべての設定クラスには、対応するモデリングファイルで使用されている属性のみが含まれています。 - `utils/check_copies.py` によって実行される、README とドキュメントのインデックスの翻訳が、メインのREADME と同じモデルリストを持っています。 - `utils/check_table.py` によって実行される、ドキュメンテーションの自動生成テーブルが最新であることを確認します。 - `utils/check_dummies.py` によって実行される、すべてのオブジェクトが利用可能であり、オプションの依存関係がすべてインストールされていなくても問題ありません。 このチェックが失敗する場合、最初の2つの項目は手動で修正する必要があり、最後の4つはコマンドを実行して自動的に修正できます。 ```bash make fix-copies ``` 追加のチェックポイントは、新しいモデルを追加するPull Request(PR)に関連しています。主に次の点を確認します: - すべての追加されたモデルは、Auto-mapping(`utils/check_repo.py`で実行)に含まれています。 <!-- TODO Sylvain、共通のテストが実装されていることを確認するチェックを追加してください。--> - すべてのモデルが適切にテストされています(`utils/check_repo.py`で実行)。 <!-- TODO Sylvain、以下を追加してください - すべてのモデルがメインのREADMEおよびメインのドキュメント内に追加されています。 - 使用されているすべてのチェックポイントが実際にHubに存在しています --> ### Check copies Transformersライブラリは、モデルコードに関して非常に意見があるため、各モデルは他のモデルに依存せずに完全に1つのファイルに実装する必要があります。したがって、特定のモデルのコードのコピーが元のコードと一貫しているかどうかを確認する仕組みを追加しました。これにより、バグ修正がある場合、他の影響を受けるモデルをすべて確認し、変更を伝達するかコピーを破棄するかを選択できます。 <Tip> ファイルが別のファイルの完全なコピーである場合、それを`utils/check_copies.py`の`FULL_COPIES`定数に登録する必要があります。 </Tip> この仕組みは、`# Copied from xxx`という形式のコメントに依存しています。`xxx`は、コピーされているクラスまたは関数の完全なパスを含む必要があります。例えば、`RobertaSelfOutput`は`BertSelfOutput`クラスの直接のコピーですので、[こちら](https://github.com/huggingface/transformers/blob/2bd7a27a671fd1d98059124024f580f8f5c0f3b5/src/transformers/models/roberta/modeling_roberta.py#L289)にコメントがあります。 ```py # Copied from transformers.models.bert.modeling_bert.BertSelfOutput ``` 注意点として、これをクラス全体に適用する代わりに、コピー元の関連メソッドに適用できます。たとえば、[こちら](https://github.com/huggingface/transformers/blob/2bd7a27a671fd1d98059124024f580f8f5c0f3b5/src/transformers/models/roberta/modeling_roberta.py#L598)では、`RobertaPreTrainedModel._init_weights` が `BertPreTrainedModel` からコピーされており、以下のコメントがあります: ```py # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Roberta ``` 注:矢印の周りにはスペースが含まれていてはいけません(もちろん、そのスペースが置換パターンの一部である場合を除きます)。 カンマで区切られた複数のパターンを追加できます。例えば、ここでは `CamemberForMaskedLM` は `RobertaForMaskedLM` の直接のコピーで、2つの置換があります: `Roberta` から `Camembert` へ、そして `ROBERTA` から `CAMEMBERT` へと置換されます。[こちら](https://github.com/huggingface/transformers/blob/15082a9dc6950ecae63a0d3e5060b2fc7f15050a/src/transformers/models/camembert/modeling_camembert.py#L929)で、この作業はコメント付きで行われています。 ```py # Copied from transformers.models.roberta.modeling_roberta.RobertaForMaskedLM with Roberta->Camembert, ROBERTA->CAMEMBERT ``` もし順序が重要な場合(以前の置換と競合する可能性があるため)、置換は左から右に実行されます。 <Tip> もし置換がフォーマットを変更する場合(たとえば、短い名前を非常に長い名前に置き換える場合など)、自動フォーマッタを適用した後にコピーが確認されます。 </Tip> パターンが同じ置換の異なるケース(大文字と小文字のバリアントがある)の場合、オプションとして `all-casing` を追加するだけの別の方法もあります。[こちら](https://github.com/huggingface/transformers/blob/15082a9dc6950ecae63a0d3e5060b2fc7f15050a/src/transformers/models/mobilebert/modeling_mobilebert.py#L1237)は、`MobileBertForSequenceClassification` 内の例で、コメントがついています。 ```py # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing ``` この場合、コードは「BertForSequenceClassification」からコピーされ、次のように置換されます: - `Bert` を `MobileBert` に置き換える(例:`init`で `MobileBertModel` を使用する場合) - `bert` を `mobilebert` に置き換える(例:`self.mobilebert` を定義する場合) - `BERT` を `MOBILEBERT` に置き換える(定数 `MOBILEBERT_INPUTS_DOCSTRING` 内で)
transformers/docs/source/ja/pr_checks.md/0
{ "file_path": "transformers/docs/source/ja/pr_checks.md", "repo_id": "transformers", "token_count": 5982 }
299
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Monocular depth estimation 単眼奥行き推定は、シーンの奥行き情報を画像から予測することを含むコンピューター ビジョン タスクです。 単一の画像。言い換えれば、シーン内のオブジェクトの距離を距離から推定するプロセスです。 単一カメラの視点。 単眼奥行き推定には、3D 再構築、拡張現実、自動運転、 そしてロボット工学。モデルがオブジェクト間の複雑な関係を理解する必要があるため、これは困難な作業です。 シーンとそれに対応する深度情報(照明条件などの要因の影響を受ける可能性があります) オクルージョンとテクスチャ。 <Tip> このタスクと互換性のあるすべてのアーキテクチャとチェックポイントを確認するには、[タスクページ](https://huggingface.co/tasks/depth-estimation) を確認することをお勧めします。 </Tip> このガイドでは、次の方法を学びます。 * 深度推定パイプラインを作成する * 手動で深度推定推論を実行します 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install -q transformers ``` ## Depth estimation pipeline 深度推定をサポートするモデルで推論を試す最も簡単な方法は、対応する [`pipeline`] を使用することです。 [Hugging Face Hub のチェックポイント](https://huggingface.co/models?pipeline_tag=Depth-estimation&sort=downloads) からパイプラインをインスタンス化します。 ```py >>> from transformers import pipeline >>> checkpoint = "vinvino02/glpn-nyu" >>> depth_estimator = pipeline("depth-estimation", model=checkpoint) ``` 次に、分析する画像を選択します。 ```py >>> from PIL import Image >>> import requests >>> url = "https://unsplash.com/photos/HwBAsSbPBDU/download?ixid=MnwxMjA3fDB8MXxzZWFyY2h8MzR8fGNhciUyMGluJTIwdGhlJTIwc3RyZWV0fGVufDB8MHx8fDE2Nzg5MDEwODg&force=true&w=640" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/depth-estimation-example.jpg" alt="Photo of a busy street"/> </div> 画像をパイプラインに渡します。 ```py >>> predictions = depth_estimator(image) ``` パイプラインは 2 つのエントリを含む辞書を返します。最初のものは`predicted_ Depth`と呼ばれ、次の値を持つテンソルです。 深さは各ピクセルのメートル単位で表されます。 2 番目の`depth`は、深度推定結果を視覚化する PIL 画像です。 視覚化された結果を見てみましょう。 ```py >>> predictions["depth"] ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/depth-visualization.png" alt="Depth estimation visualization"/> </div> ## Depth estimation inference by hand 深度推定パイプラインの使用方法を理解したので、同じ結果を手動で複製する方法を見てみましょう。 まず、[Hugging Face Hub のチェックポイント](https://huggingface.co/models?pipeline_tag=Depth-estimation&sort=downloads) からモデルと関連プロセッサをロードします。 ここでは、前と同じチェックポイントを使用します。 ```py >>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation >>> checkpoint = "vinvino02/glpn-nyu" >>> image_processor = AutoImageProcessor.from_pretrained(checkpoint) >>> model = AutoModelForDepthEstimation.from_pretrained(checkpoint) ``` 必要な画像変換を処理する`image_processor`を使用して、モデルの画像入力を準備します。 サイズ変更や正規化など: ```py >>> pixel_values = image_processor(image, return_tensors="pt").pixel_values ``` 準備された入力をモデルに渡します。 ```py >>> import torch >>> with torch.no_grad(): ... outputs = model(pixel_values) ... predicted_depth = outputs.predicted_depth ``` 結果を視覚化します。 ```py >>> import numpy as np >>> # interpolate to original size >>> prediction = torch.nn.functional.interpolate( ... predicted_depth.unsqueeze(1), ... size=image.size[::-1], ... mode="bicubic", ... align_corners=False, ... ).squeeze() >>> output = prediction.numpy() >>> formatted = (output * 255 / np.max(output)).astype("uint8") >>> depth = Image.fromarray(formatted) >>> depth ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/depth-visualization.png" alt="Depth estimation visualization"/> </div>
transformers/docs/source/ja/tasks/monocular_depth_estimation.md/0
{ "file_path": "transformers/docs/source/ja/tasks/monocular_depth_estimation.md", "repo_id": "transformers", "token_count": 2264 }
300
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Testing 🤗 Transformersモデルがどのようにテストされ、新しいテストを書いて既存のテストを改善できるかを見てみましょう。 このリポジトリには2つのテストスイートがあります: 1. `tests` -- 一般的なAPI用のテスト 2. `examples` -- APIの一部ではないさまざまなアプリケーション用のテスト ## How transformers are tested 1. PRが提出されると、9つのCircleCiジョブでテストされます。PRへの新しいコミットごとに再テストされます。これらのジョブは、[この設定ファイル](https://github.com/huggingface/transformers/tree/main/.circleci/config.yml)で定義されており、必要な場合は同じ環境を自分のマシンで再現できます。 これらのCIジョブは `@slow` テストを実行しません。 2. [GitHub Actions](https://github.com/huggingface/transformers/actions)によって実行される3つのジョブがあります: - [torch hub integration](https://github.com/huggingface/transformers/tree/main/.github/workflows/github-torch-hub.yml): torch hubの統合が動作するかどうかを確認します。 - [self-hosted (push)](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-push.yml): `main` にコミットが行われた場合に、GPUで高速テストを実行します。このジョブは、`main` でのコミットが以下のフォルダーのコードを更新した場合にのみ実行されます:`src`、`tests`、`.github`(追加されたモデルカード、ノートブックなどの実行を防ぐため)。 - [self-hosted runner](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-scheduled.yml): GPUで `tests` と `examples` の通常のテストと遅いテストを実行します。 ```bash RUN_SLOW=1 pytest tests/ RUN_SLOW=1 pytest examples/ ``` 結果は[here](https://github.com/huggingface/transformers/actions)で観察できます。 ## Running tests ### Choosing which tests to run このドキュメントは、テストを実行する方法の多くの詳細について説明しています。すべてを読んだ後でも、さらに詳細が必要な場合は、[こちら](https://docs.pytest.org/en/latest/usage.html)で見つけることができます。 以下は、テストを実行するためのいくつかの最も便利な方法です。 すべて実行します: ```console pytest ``` または: ```bash make test ``` 後者は次のように定義されることに注意してください。 ```bash python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` 以下は、pytestに渡す設定情報です。 - テストプロセスをCPUコアの数と同じだけ実行するように指示します。ただし、RAMが十分でない場合は注意が必要です。 - 同じファイルからのすべてのテストは、同じテストプロセスで実行されるようにします。 - 出力のキャプチャを行いません。 - 冗長モードで実行します。 ### Getting the list of all tests テストスイートのすべてのテスト: ```bash pytest --collect-only -q ``` 指定されたテスト ファイルのすべてのテスト: ```bash pytest tests/test_optimization.py --collect-only -q ``` ### Run a specific test module 個別のテスト モジュールを実行するには: ```bash pytest tests/utils/test_logging.py ``` ### Run specific tests ほとんどのテストでunittestが使用されているため、特定のサブテストを実行するには、それらのテストを含むunittestクラスの名前を知っている必要があります。例えば、それは次のようになるかもしれません: ```bash pytest tests/test_optimization.py::OptimizationTest::test_adam_w ``` テストの実行方法: テストファイル: `tests/test_optimization.py` クラス名: `OptimizationTest` テスト関数の名前: `test_adam_w` ファイルに複数のクラスが含まれている場合は、特定のクラスのテストのみを実行することを選択できます。例えば: ```bash pytest tests/test_optimization.py::OptimizationTest ``` テストクラス内のすべてのテストを実行します。 前述の通り、`OptimizationTest` クラスに含まれるテストを実行するには、次のコマンドを実行できます: ```bash pytest tests/test_optimization.py::OptimizationTest --collect-only -q ``` キーワード式を使用してテストを実行できます。 名前に `adam` が含まれるテストのみを実行するには: ```bash pytest -k adam tests/test_optimization.py ``` `and`および`or`は、すべてのキーワードが一致するか、いずれかを示すために使用できます。`not`は否定するために使用できます。 `adam`という名前を含むテストを除いてすべてのテストを実行するには: ```bash pytest -k "not adam" tests/test_optimization.py ``` 以下は、提供されたテキストの日本語訳です。 ```bash pytest -k "ada and not adam" tests/test_optimization.py ``` たとえば、`test_adafactor`と`test_adam_w`の両方を実行するには、以下のコマンドを使用できます: ```bash pytest -k "test_adam_w or test_adam_w" tests/test_optimization.py ``` 注意: ここでは、`or` を使用しています。キーワードのいずれか一つが一致すれば、両方を含めるためです。 両方のパターンを含むテストのみを含めたい場合は、`and` を使用してください。 ```bash pytest -k "test and ada" tests/test_optimization.py ``` ### Run `accelerate` tests 時々、モデルに対して `accelerate` テストを実行する必要があります。たとえば、`OPT` 実行に対してこれらのテストを実行したい場合、コマンドに `-m accelerate_tests` を追加するだけで済みます: ```bash RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py ``` ### Run documentation tests ドキュメンテーションの例が正しいかどうかをテストするには、`doctests` が合格しているかを確認する必要があります。 例として、[`WhisperModel.forward` のドックストリング](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035)を使用しましょう。 ```python r""" Returns: Example: ```python >>> import torch >>> from transformers import WhisperModel, WhisperFeatureExtractor >>> from datasets import load_dataset >>> model = WhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_features = inputs.input_features >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" ``` 指定したファイル内のすべてのドックストリング例を自動的にテストするために、以下の行を実行してください: ```bash pytest --doctest-modules <path_to_file_or_dir> ``` ファイルにマークダウン拡張子がある場合は、`--doctest-glob="*.md"`引数を追加する必要があります。 ### Run only modified tests [pytest-picked](https://github.com/anapaulagomes/pytest-picked)を使用すると、未ステージングのファイルまたは現在のブランチ(Gitに従って)に関連するテストを実行できます。これは、変更内容に関連するテストのみ実行されるため、変更が何も壊れていないことを迅速に確認する素晴らしい方法です。変更されていないファイルに関連するテストは実行されません。 ```bash pip install pytest-picked ``` ```bash pytest --picked ``` すべてのテストは、変更されたがまだコミットされていないファイルとフォルダから実行されます。 ### Automatically rerun failed tests on source modification [pytest-xdist](https://github.com/pytest-dev/pytest-xdist)は、非常に便利な機能を提供しており、すべての失敗したテストを検出し、ファイルを修正する間にそれらの失敗したテストを連続して再実行することができます。そのため、修正を行った後にpytestを再起動する必要がありません。すべてのテストが合格するまで繰り返され、その後再度フルランが実行されます。 ```bash pip install pytest-xdist ``` モードに入るには: `pytest -f`または`pytest --looponfail` ファイルの変更は、`looponfailroots`ルートディレクトリとその内容全体(再帰的に)を見て検出されます。この値のデフォルトが機能しない場合、`setup.cfg`で設定オプションを変更してプロジェクト内で変更できます。 ```ini [tool:pytest] looponfailroots = transformers tests ``` または `pytest.ini`/`tox.ini` ファイル: ```ini [pytest] looponfailroots = transformers tests ``` ファイルの変更を探すことは、iniファイルのディレクトリを基準にして指定されたディレクトリ内でのみ行われます。 [pytest-watch](https://github.com/joeyespo/pytest-watch) は、この機能の代替実装です。 ### Skip a test module 特定のテストモジュールを除外してすべてのテストモジュールを実行したい場合、実行するテストの明示的なリストを指定することができます。例えば、`test_modeling_*.py` テストを除外してすべてを実行するには次のようにします: ```bash pytest *ls -1 tests/*py | grep -v test_modeling* ``` ### Clearing state CIビルドおよび速度に対する隔離が重要な場合(キャッシュに対して)、キャッシュをクリアする必要があります: ```bash pytest --cache-clear tests ``` ### Running tests in parallel 前述のように、`make test` は `pytest-xdist` プラグインを介してテストを並列実行します(`-n X` 引数、例: `-n 2` で2つの並列ジョブを実行)。 `pytest-xdist` の `--dist=` オプションを使用すると、テストがどのようにグループ化されるかを制御できます。`--dist=loadfile` は同じファイルにあるテストを同じプロセスに配置します。 テストの実行順序が異なり予測不可能であるため、`pytest-xdist` を使用してテストスイートを実行すると失敗が発生する場合(つまり、いくつかの未検出の連動テストがある場合)、[pytest-replay](https://github.com/ESSS/pytest-replay) を使用してテストを同じ順序で再生し、その後、失敗するシーケンスを最小限にするのに役立ちます。 ### Test order and repetition 潜在的な相互依存性や状態に関連するバグ(ティアダウン)を検出するために、テストを複数回、連続して、ランダムに、またはセットで繰り返すことは有用です。そして、単純な複数回の繰り返しは、DLのランダム性によって明らかになるいくつかの問題を検出するのに役立ちます。 #### Repeat tests - [pytest-flakefinder](https://github.com/dropbox/pytest-flakefinder): ```bash pip install pytest-flakefinder ``` そして、すべてのテストを複数回実行します (デフォルトでは 50 回)。 ```bash pytest --flake-finder --flake-runs=5 tests/test_failing_test.py ``` <Tip> このプラグインは、`pytest-xdist` の `-n` フラグでは動作しません。 </Tip> <Tip> 別のプラグイン `pytest-repeat` もありますが、これは `unittest` では動作しません。 </Tip> #### Run tests in a random order ```bash pip install pytest-random-order ``` 重要: `pytest-random-order` が存在すると、テストは自動的にランダム化されます。設定の変更や変更は必要ありません。 コマンドラインオプションは必須です。 前に説明したように、これにより、結合されたテスト (1 つのテストの状態が別のテストの状態に影響を与える) の検出が可能になります。いつ `pytest-random-order` がインストールされていると、そのセッションに使用されたランダム シードが出力されます。例: ```bash pytest tests [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` そのため、指定された特定のシーケンスが失敗した場合、その正確なシードを追加することでそれを再現できます。例: ```bash pytest --random-order-seed=573663 [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` 特定のテストのリストを使用しない場合、またはまったくリストを使用しない場合、同じテストの正確な順序を再現します。テストのリストを手動で絞り込み始めると、シードに依存せず、テストが失敗した正確な順序で手動でリストを指定する必要があります。これには、`--random-order-bucket=none` を使用してランダム化を無効にするようpytestに指示する必要があります。例えば、次のようにします: ```bash pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py ``` すべてのテストのシャッフルを無効にするには: ```bash pytest --random-order-bucket=none ``` デフォルトでは、`--random-order-bucket=module` が暗黙的に適用され、モジュールレベルでファイルをシャッフルします。また、`class`、`package`、`global`、および`none` レベルでシャッフルすることもできます。詳細については、その[ドキュメンテーション](https://github.com/jbasko/pytest-random-order)を参照してください。 別のランダム化の代替手段は、[`pytest-randomly`](https://github.com/pytest-dev/pytest-randomly) です。このモジュールは非常に似た機能/インターフェースを持っていますが、`pytest-random-order` で利用可能なバケットモードを持っていません。インストール後に自動的に有効になるという同じ問題があります。 ### Look and feel variations #### pytest-sugar [pytest-sugar](https://github.com/Frozenball/pytest-sugar) は、外観と操作性を向上させ、プログレスバーを追加し、即座に失敗したテストとアサーションを表示するプラグインです。インストール後に自動的にアクティブ化されます。 ```bash pip install pytest-sugar ``` これを使用せずにテストを実行するには、次を実行します。 ```bash pytest -p no:sugar ``` またはアンインストールします。 #### Report each sub-test name and its progress `pytest` による単一またはグループのテストの場合 (`pip install pytest-pspec` の後): ```bash pytest --pspec tests/test_optimization.py ``` #### Instantly shows failed tests [pytest-instafail](https://github.com/pytest-dev/pytest-instafail) では、失敗とエラーが即座に表示されます。 テストセッションが終了するまで待機します。 ```bash pip install pytest-instafail ``` ```bash pytest --instafail ``` ### To GPU or not to GPU GPU が有効な設定で、CPU のみモードでテストするには、`CUDA_VISIBLE_DEVICES=""`を追加します。 ```bash CUDA_VISIBLE_DEVICES="" pytest tests/utils/test_logging.py ``` または、複数の GPU がある場合は、`pytest` でどれを使用するかを指定できます。たとえば、 2 番目の GPU GPU `0` と `1` がある場合は、次を実行できます。 ```bash CUDA_VISIBLE_DEVICES="1" pytest tests/utils/test_logging.py ``` これは、異なるGPUで異なるタスクを実行したい場合に便利です。 一部のテストはCPUのみで実行する必要があり、他のテストはCPU、GPU、またはTPUで実行する必要があり、また別のテストは複数のGPUで実行する必要があります。次のスキップデコレーターは、テストのCPU/GPU/TPUに関する要件を設定するために使用されます: - `require_torch` - このテストはtorchの下でのみ実行されます。 - `require_torch_gpu` - `require_torch` に加えて、少なくとも1つのGPUが必要です。 - `require_torch_multi_gpu` - `require_torch` に加えて、少なくとも2つのGPUが必要です。 - `require_torch_non_multi_gpu` - `require_torch` に加えて、0または1つのGPUが必要です。 - `require_torch_up_to_2_gpus` - `require_torch` に加えて、0、1、または2つのGPUが必要です。 - `require_torch_xla` - `require_torch` に加えて、少なくとも1つのTPUが必要です。 以下の表にGPUの要件を示します: | n gpus | decorator | |--------+--------------------------------| | `>= 0` | `@require_torch` | | `>= 1` | `@require_torch_gpu` | | `>= 2` | `@require_torch_multi_gpu` | | `< 2` | `@require_torch_non_multi_gpu` | | `< 3` | `@require_torch_up_to_2_gpus` | たとえば、使用可能な GPU が 2 つ以上あり、pytorch がインストールされている場合にのみ実行する必要があるテストを次に示します。 ```python no-style @require_torch_multi_gpu def test_example_with_multi_gpu(): ``` テストに `tensorflow` が必要な場合は、`require_tf` デコレータを使用します。例えば: ```python no-style @require_tf def test_tf_thing_with_tensorflow(): ``` これらのデコレータは積み重ねることができます。たとえば、テストが遅く、pytorch で少なくとも 1 つの GPU が必要な場合は、次のようになります。 設定方法: ```python no-style @require_torch_gpu @slow def test_example_slow_on_gpu(): ``` `@parametrized` のような一部のデコレータはテスト名を書き換えるため、`@require_*` スキップ デコレータをリストする必要があります。 最後にそれらが正しく動作するようにします。正しい使用例は次のとおりです ```python no-style @parameterized.expand(...) @require_torch_multi_gpu def test_integration_foo(): ``` この順序の問題は `@pytest.mark.parametrize` には存在しません。最初または最後に配置しても、それでも問題は解決されます。 仕事。ただし、それは非単体テストでのみ機能します。 内部テスト: - 利用可能な GPU の数: ```python from transformers.testing_utils import get_gpu_count n_gpu = get_gpu_count() # works with torch and tf ``` ### Testing with a specific PyTorch backend or device 特定のtorchデバイスでテストスイートを実行するには、`TRANSFORMERS_TEST_DEVICE="$device"` を追加します。ここで `$device` は対象のバックエンドです。例えば、CPUでテストするには以下のようにします: ```bash TRANSFORMERS_TEST_DEVICE="cpu" pytest tests/utils/test_logging.py ``` この変数は、`mps`などのカスタムまたはあまり一般的ではない PyTorch バックエンドをテストするのに役立ちます。また、特定の GPU をターゲットにしたり、CPU 専用モードでテストしたりすることで、`CUDA_VISIBLE_DEVICES`と同じ効果を達成するために使用することもできます。 特定のデバイスでは、初めて「torch」をインポートした後、追加のインポートが必要になります。これは、環境変数 `TRANSFORMERS_TEST_BACKEND` を使用して指定できます。 ```bash TRANSFORMERS_TEST_BACKEND="torch_npu" pytest tests/utils/test_logging.py ``` ### Distributed training `pytest` は直接的に分散トレーニングを処理することはできません。試みると、サブプロセスは正しい処理を行わず、自分自身が `pytest` であると思い込んでテストスイートをループで実行し続けます。ただし、通常のプロセスを生成し、それから複数のワーカーを生成し、IOパイプを管理するプロセスを生成すれば機能します。 これを使用するいくつかのテストがあります: - [test_trainer_distributed.py](https://github.com/huggingface/transformers/tree/main/tests/trainer/test_trainer_distributed.py) - [test_deepspeed.py](https://github.com/huggingface/transformers/tree/main/tests/deepspeed/test_deepspeed.py) 実行ポイントにすぐに移動するには、これらのテスト内で `execute_subprocess_async` 呼び出しを検索してください。 これらのテストを実行するには、少なくとも2つのGPUが必要です: ```bash CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py ``` ### Output capture テストの実行中に、`stdout` および `stderr` に送信された出力はキャプチャされます。テストまたはセットアップメソッドが失敗した場合、通常、それに対応するキャプチャされた出力が失敗のトレースバックと共に表示されます。 出力のキャプチャを無効にし、`stdout` と `stderr` を通常通りに取得するには、`-s` または `--capture=no` を使用してください: これらのテストを実行するには少なくとも2つのGPUが必要です: ```bash pytest -s tests/utils/test_logging.py ``` テスト結果を JUnit 形式の出力に送信するには: ```bash py.test tests --junitxml=result.xml ``` ### Color control 色を持たないようにする(例:黄色のテキストを白い背景に表示すると読みにくいです): ```bash pytest --color=no tests/utils/test_logging.py ``` ### Sending test report to online pastebin service テスト失敗ごとに URL を作成します。 ```bash pytest --pastebin=failed tests/utils/test_logging.py ``` これにより、テスト実行情報がリモートのPasteサービスに送信され、各エラーに対してURLが提供されます。通常通りテストを選択するか、たとえば特定のエラーのみを送信したい場合は `-x` を追加で指定できます。 テストセッション全体のログに対するURLを作成する方法: ```bash pytest --pastebin=all tests/utils/test_logging.py ``` ## Writing tests 🤗 transformersのテストは `unittest` を基にしていますが、 `pytest` で実行されるため、ほとんどの場合、両方のシステムの機能を使用できます。 [こちら](https://docs.pytest.org/en/stable/unittest.html)でサポートされている機能を読むことができますが、重要なことは、ほとんどの `pytest` のフィクスチャが動作しないことです。パラメータ化も同様ですが、似たような方法で動作する `parameterized` モジュールを使用しています。 ### Parametrization 同じテストを異なる引数で複数回実行する必要があることがよくあります。これはテスト内部から行うこともできますが、その場合、そのテストを単一の引数セットで実行する方法はありません。 ```python # test_this1.py import unittest from parameterized import parameterized class TestMathUnitTest(unittest.TestCase): @parameterized.expand( [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ] ) def test_floor(self, name, input, expected): assert_equal(math.floor(input), expected) ``` デフォルトでは、このテストは3回実行され、それぞれの実行で `test_floor` の最後の3つの引数がパラメータリストの対応する引数に割り当てられます。 そして、`negative` と `integer` パラメータのセットのみを実行することもできます: ```bash pytest -k "negative and integer" tests/test_mytest.py ``` または、`Negative`のサブテストを除くすべての場合、次のようになります。 ```bash pytest -k "not negative" tests/test_mytest.py ``` `-k` フィルターを使用することに加えて、各サブテストの正確な名前を調べ、その正確な名前を使用して任意のサブテストまたはすべてのサブテストを実行することができます。 ```bash pytest test_this1.py --collect-only -q ``` すると次のものがリストされます: ```bash test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer test_this1.py::TestMathUnitTest::test_floor_2_large_fraction ``` したがって、2 つの特定のサブテストのみを実行できるようになりました。 ```bash pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer ``` `transformers`の開発者依存関係にすでに含まれているモジュール[parameterized](https://pypi.org/project/parameterized/) は、`unittests` と `pytest` テストの両方で機能します。 ただし、テストが `unittest` でない場合、`pytest.mark.parametrize` を使用することができます(または既存のテストのいくつかで、主に `examples` の下で使用されているのを見ることができます)。 次に、同じ例を示しますが、今度は `pytest` の `parametrize` マーカーを使用しています: ```python # test_this2.py import pytest @pytest.mark.parametrize( "name, input, expected", [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ], ) def test_floor(name, input, expected): assert_equal(math.floor(input), expected) ``` `parameterized` と同様に、`pytest.mark.parametrize` を使用すると、`-k` フィルタが役立たない場合でも、サブテストの実行を細かく制御できます。ただし、このパラメータ化関数はサブテストの名前をわずかに異なるものにします。以下にその例を示します: ```bash pytest test_this2.py --collect-only -q ``` すると次のものがリストされます: ```bash test_this2.py::test_floor[integer-1-1.0] test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[large fraction-1.6-1] ``` これで、特定のテストのみを実行できるようになりました。 ```bash pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0] ``` 前の例と同様に。 ### Files and directories テストの中で、現在のテストファイルからの相対位置を知る必要があることがよくあります。しかし、これは簡単なことではありません。なぜなら、テストは複数のディレクトリから呼び出されるか、異なる深さのサブディレクトリに存在することがあるからです。`transformers.test_utils.TestCasePlus` というヘルパークラスは、すべての基本パスを整理し、簡単にアクセスできるようにすることで、この問題を解決します。 - `pathlib` オブジェクト(すべて完全に解決されたもの): - `test_file_path` - 現在のテストファイルのパス、つまり `__file__` - `test_file_dir` - 現在のテストファイルを含むディレクトリ - `tests_dir` - `tests` テストスイートのディレクトリ - `examples_dir` - `examples` テストスイートのディレクトリ - `repo_root_dir` - リポジトリのディレクトリ - `src_dir` - `transformers` サブディレクトリが存在する場所 - パスの文字列表現――上記と同じですが、これらは `pathlib` オブジェクトではなく文字列としてパスを返します: - `test_file_path_str` - `test_file_dir_str` - `tests_dir_str` - `examples_dir_str` - `repo_root_dir_str` - `src_dir_str` これらを使用し始めるには、テストが `transformers.test_utils.TestCasePlus` のサブクラスに存在することを確認するだけです。例: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_local_locations(self): data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro" ``` もし、`pathlib` を介してパスを操作する必要がない場合、または単に文字列としてパスが必要な場合は、`pathlib` オブジェクトに `str()` を呼び出すか、`_str` で終わるアクセサを使用できます。例: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_stringified_locations(self): examples_dir = self.examples_dir_str ``` ### Temporary files and directories 一意の一時ファイルとディレクトリの使用は、並列テストの実行には欠かせません。これにより、テストがお互いのデータを上書きしないようにします。また、これらを作成した各テストの終了時に一時ファイルとディレクトリが削除されることを望みます。そのため、これらのニーズを満たすパッケージである `tempfile` のようなパッケージの使用は重要です。 しかし、テストのデバッグ時には、一時ファイルやディレクトリに何が格納されているかを確認できる必要があり、テストを再実行するたびにランダムに変更されないその正確なパスを知りたいと思います。 `transformers.test_utils.TestCasePlus` というヘルパークラスは、このような目的に最適です。これは `unittest.TestCase` のサブクラスであるため、テストモジュールで簡単に継承することができます。 以下はその使用例です: ```python from transformers.testing_utils import TestCasePlus class ExamplesTests(TestCasePlus): def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` このコードはユニークな一時ディレクトリを作成し、`tmp_dir` をその場所に設定します。 - ユニークな一時ディレクトリを作成します: ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` `tmp_dir` には、作成された一時ディレクトリへのパスが含まれます。期間終了後は自動的に削除されます テスト。 - 任意の一時ディレクトリを作成し、テストの開始前にそれが空であることを確認し、テスト後には空にしないでください。 ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir("./xxx") ``` これは、特定のディレクトリを監視し、前のテストがそこにデータを残さないことを確認したい場合に、デバッグに役立ちます。 - `before` と `after` 引数を直接オーバーライドすることで、デフォルトの動作をオーバーライドできます。以下のいずれかの動作に導きます: - `before=True`:テストの開始時に常に一時ディレクトリがクリアされます。 - `before=False`:一時ディレクトリが既に存在する場合、既存のファイルはそのままになります。 - `after=True`:テストの終了時に常に一時ディレクトリが削除されます。 - `after=False`:テストの終了時に常に一時ディレクトリはそのままになります。 <Tip> `rm -r`の相当を安全に実行するために、明示的な `tmp_dir` が使用される場合、プロジェクトリポジトリのチェックアウトのサブディレクトリのみが許可されます。誤って `/tmp` などのファイルシステムの重要な部分が削除されないように、常に `./` から始まるパスを渡してください。 </Tip> <Tip> 各テストは複数の一時ディレクトリを登録でき、要求がない限りすべて自動で削除されます。 </Tip> ### Temporary sys.path override 別のテストからインポートするために一時的に `sys.path` をオーバーライドする必要がある場合、`ExtendSysPath` コンテキストマネージャを使用できます。例: ```python import os from transformers.testing_utils import ExtendSysPath bindir = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/.."): from test_trainer import TrainerIntegrationCommon # noqa ``` ### Skipping tests これは、バグが見つかり、新しいテストが作成された場合であっても、バグがまだ修正されていない場合に役立ちます。メインリポジトリにコミットできるようにするには、`make test` の実行中にそれをスキップする必要があります。 メソッド: - **skip** は、テストが特定の条件が満たされた場合にのみパスすることを期待しており、それ以外の場合は pytest がテストの実行をスキップします。一般的な例は、Windows専用のテストを非Windowsプラットフォームでスキップする場合、または現在利用できない外部リソースに依存するテストをスキップする場合です(例: データベースが利用できない場合)。 - **xfail** は、何らかの理由でテストが失敗することを期待しています。一般的な例は、まだ実装されていない機能のテストや、まだ修正されていないバグのテストです。テストが予想される失敗にもかかわらずパスした場合(pytest.mark.xfailでマークされたテスト)、それはxpassとしてテストサマリーに報告されます。 これらの2つの間の重要な違いの1つは、`skip` はテストを実行しない点であり、`xfail` は実行します。したがって、バグのあるコードが他のテストに影響を与える場合は、`xfail` を使用しないでください。 #### Implementation - テスト全体を無条件にスキップする方法は次のとおりです: ```python no-style @unittest.skip(reason="this bug needs to be fixed") def test_feature_x(): ``` または pytest 経由: ```python no-style @pytest.mark.skip(reason="this bug needs to be fixed") ``` または `xfail` の方法: ```python no-style @pytest.mark.xfail def test_feature_x(): ``` - テスト内の内部チェックに基づいてテストをスキップする方法は次のとおりです。 ```python def test_feature_x(): if not has_something(): pytest.skip("unsupported configuration") ``` またはモジュール全体: ```python import pytest if not pytest.config.getoption("--custom-flag"): pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True) ``` または `xfail` の方法: ```python def test_feature_x(): pytest.xfail("expected to fail until bug XYZ is fixed") ``` - 一部のインポートが欠落している場合にモジュール内のすべてのテストをスキップする方法は次のとおりです。 ```python docutils = pytest.importorskip("docutils", minversion="0.3") ``` - 条件に基づいてテストをスキップします。 ```python no-style @pytest.mark.skipif(sys.version_info < (3,6), reason="requires python3.6 or higher") def test_feature_x(): ``` または: ```python no-style @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_feature_x(): ``` またはモジュール全体をスキップします。 ```python no-style @pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows") class TestClass(): def test_feature_x(self): ``` 詳細、例、および方法についての詳細は[こちら](https://docs.pytest.org/en/latest/skipping.html)を参照してください。 ### Slow tests テストライブラリは着実に成長しており、テストの一部は数分かかります。そのため、CIでテストスイートの完了を待つのは1時間待つ余裕がないことがあります。したがって、いくつかの例外を除いて、遅いテストは以下の例のようにマークすべきです: ```python no-style from transformers.testing_utils import slow @slow def test_integration_foo(): ``` テストが`@slow`としてマークされたら、そのようなテストを実行するには、環境変数 `RUN_SLOW=1`を設定します。例: ```bash RUN_SLOW=1 pytest tests ``` `@parameterized` のようなデコレータはテスト名を書き換えるため、`@slow` および他のスキップデコレータ `@require_*` は正しく動作するためには、最後にリストアップする必要があります。以下は正しい使用例の一例です: ```python no-style @parameterized.expand(...) @slow def test_integration_foo(): ``` このドキュメントの冒頭で説明したように、遅いテストは定期的なスケジュールに従って実行され、PRのCIチェックでは実行されません。そのため、一部の問題がPRの提出時に見落とされ、マージされる可能性があります。そのような問題は次回のスケジュールされたCIジョブで検出されます。しかし、それはまた、PRを提出する前に自分のマシンで遅いテストを実行する重要性を意味しています。 どのテストを遅いテストとしてマークすべきかを選択するための、おおまかな意思決定メカニズムが次に示されています: - テストがライブラリの内部コンポーネントの1つに焦点を当てている場合(例: モデリングファイル、トークン化ファイル、パイプライン)、そのテストは遅いテストスイートで実行する必要があります。それがライブラリの他の側面、たとえばドキュメンテーションや例に焦点を当てている場合、それらのテストは遅いテストスイートで実行する必要があります。そして、このアプローチを洗練させるために例外を設ける必要があります。 - 重いウェイトセットや約50MB以上のデータセットをダウンロードする必要があるすべてのテスト(例: モデル統合テスト、トークナイザ統合テスト、パイプライン統合テスト)は遅いテストとして設定する必要があります。新しいモデルを追加する場合、統合テスト用にランダムなウェイトを持つ小さなバージョンを作成し、ハブにアップロードする必要があります。これについては以下の段落で詳しく説明します。 - 特に高速化されていないトレーニングを行う必要があるすべてのテストは遅いテストとして設定する必要があります。 - 一部の「遅い」であるべきでないテストが非常に遅い場合、およびそれらを `@slow` として設定する必要がある場合には例外を導入できます。大容量のファイルをディスクに保存および読み込みする自動モデリングテストは、`@slow` としてマークされたテストの良い例です。 - CIで1秒未満でテストが完了する場合(ダウンロードを含む)、それは通常のテストであるべきです。 すべての非遅いテストは、さまざまな内部要素を完全にカバーする必要がありますが、高速である必要があります。たとえば、特別に作成された小さなモデル(レイヤー数が最小限で、語彙サイズが小さいなど)を使用して、かなりのカバレッジを実現できます。その後、`@slow` テストでは大規模な遅いモデルを使用して質的なテストを実行できます。これらを使用するには、以下のように *tiny* モデルを探してください: ```bash grep tiny tests examples ``` [スクリプトの例](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py)があり、これにより tiny-wmt19-en-de のような小さなモデルが作成されます。特定のモデルのアーキテクチャに簡単に調整できます。 実行時間を誤って測定することが簡単です。たとえば、巨大なモデルのダウンロードに関するオーバーヘッドがある場合、ローカルでテストするとダウンロードされたファイルがキャッシュされ、ダウンロード時間が計測されなくなります。したがって、CIログの実行速度レポート(`pytest --durations=0 tests` の出力)を確認してください。 このレポートは、遅いテストとしてマークされていない遅い外れ値や、高速に書き直す必要があるテストを見つけるのにも役立ちます。テストスイートがCIで遅くなり始めた場合、このレポートのトップリストには最も遅いテストが表示されます。 ### Testing the stdout/stderr output `stdout` および/または `stderr` に書き込む関数をテストするために、テストは `pytest` の [capsys システム](https://docs.pytest.org/en/latest/capture.html) を使用してこれらのストリームにアクセスできます。以下はその方法です: ```python import sys def print_to_stdout(s): print(s) def print_to_stderr(s): sys.stderr.write(s) def test_result_and_stdout(capsys): msg = "Hello" print_to_stdout(msg) print_to_stderr(msg) out, err = capsys.readouterr() # consume the captured output streams # optional: if you want to replay the consumed streams: sys.stdout.write(out) sys.stderr.write(err) # test: assert msg in out assert msg in err ``` そしてもちろん、ほとんどの場合、`stderr`は例外の一部として提供されるため、そのような場合には try/excel を使用する必要があります。 ケース: ```python def raise_exception(msg): raise ValueError(msg) def test_something_exception(): msg = "Not a good value" error = "" try: raise_exception(msg) except Exception as e: error = str(e) assert msg in error, f"{msg} is in the exception:\n{error}" ``` stdout をキャプチャするもう 1 つのアプローチは、`contextlib.redirect_stdout`を使用することです。 ```python from io import StringIO from contextlib import redirect_stdout def print_to_stdout(s): print(s) def test_result_and_stdout(): msg = "Hello" buffer = StringIO() with redirect_stdout(buffer): print_to_stdout(msg) out = buffer.getvalue() # optional: if you want to replay the consumed streams: sys.stdout.write(out) # test: assert msg in out ``` stdout をキャプチャする際の重要な潜在的な問題は、通常の `print` でこれまでに出力された内容をリセットする可能性がある `\r` 文字が含まれている可能性があることです。`pytest` 自体には問題はありませんが、`pytest -s` ではこれらの文字がバッファに含まれるため、`-s` ありとなしでテストを実行できるようにするには、`re.sub(r'~.*\r', '', buf, 0, re.M)` を使用してキャプチャされた出力に対して追加のクリーンアップを行う必要があります。 しかし、その後、`\r` が含まれているかどうかにかかわらず、すべての操作を自動的に処理するヘルパーコンテキストマネージャラッパーがあります。したがって、次のように簡単に行えます: ```python from transformers.testing_utils import CaptureStdout with CaptureStdout() as cs: function_that_writes_to_stdout() print(cs.out) ``` 完全なテスト例は次のとおりです。 ```python from transformers.testing_utils import CaptureStdout msg = "Secret message\r" final = "Hello World" with CaptureStdout() as cs: print(msg + final) assert cs.out == final + "\n", f"captured: {cs.out}, expecting {final}" ``` `stderr` をキャプチャしたい場合は、代わりに `CaptureStderr` クラスを使用してください。 ```python from transformers.testing_utils import CaptureStderr with CaptureStderr() as cs: function_that_writes_to_stderr() print(cs.err) ``` 両方のストリームを一度にキャプチャする必要がある場合は、親の `CaptureStd` クラスを使用します。 ```python from transformers.testing_utils import CaptureStd with CaptureStd() as cs: function_that_writes_to_stdout_and_stderr() print(cs.err, cs.out) ``` また、テストの問題のデバッグを支援するために、デフォルトで、これらのコンテキスト マネージャーは終了時にキャプチャされたストリームを自動的に再生します。 文脈から。 ### Capturing logger stream ロガーの出力を検証する必要がある場合は、`CaptureLogger`を使用できます。 ```python from transformers import logging from transformers.testing_utils import CaptureLogger msg = "Testing 1, 2, 3" logging.set_verbosity_info() logger = logging.get_logger("transformers.models.bart.tokenization_bart") with CaptureLogger(logger) as cl: logger.info(msg) assert cl.out, msg + "\n" ``` ### Testing with environment variables 特定のテストで環境変数の影響をテストしたい場合は、ヘルパー デコレータを使用できます。 `transformers.testing_utils.mockenv` ```python from transformers.testing_utils import mockenv class HfArgumentParserTest(unittest.TestCase): @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) ``` 場合によっては、外部プログラムを呼び出す必要があるため、`os.environ` に`PYTHONPATH`を設定してインクルードする必要があります。 複数のローカル パス。ヘルパー クラス `transformers.test_utils.TestCasePlus` が役に立ちます。 ```python from transformers.testing_utils import TestCasePlus class EnvExampleTest(TestCasePlus): def test_external_prog(self): env = self.get_env() # now call the external program, passing `env` to it ``` テストファイルが `tests` テストスイートまたは `examples` のどちらにあるかに応じて `env[PYTHONPATH]` を使用して、これら 2 つのディレクトリのいずれかを含めます。また、テストが確実に行われるようにするための `src` ディレクトリも含めます。 現在のリポジトリに対して実行され、最後に、テストが実行される前にすでに設定されていた `env[PYTHONPATH]` を使用して実行されます。 何かあれば呼ばれます。 このヘルパー メソッドは `os.environ` オブジェクトのコピーを作成するため、元のオブジェクトはそのまま残ります。 ### Getting reproducible results 状況によっては、テストのランダム性を削除したい場合があります。同一の再現可能な結果セットを取得するには、 シードを修正する必要があります: ```python seed = 42 # python RNG import random random.seed(seed) # pytorch RNGs import torch torch.manual_seed(seed) torch.backends.cudnn.deterministic = True if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) # numpy RNG import numpy as np np.random.seed(seed) # tf RNG tf.random.set_seed(seed) ``` ### Debugging tests 警告が発生した時点でデバッガーを開始するには、次の手順を実行します。 ```bash pytest tests/utils/test_logging.py -W error::UserWarning --pdb ``` ## Working with github actions workflows セルフプッシュのワークフローCIジョブをトリガーするには、以下の手順を実行する必要があります: 1. `transformers` のリモートリポジトリで新しいブランチを作成します(フォークではなく、元のリポジトリで行います)。 2. ブランチの名前は `ci_` または `ci-` で始まる必要があります(`main` もトリガーしますが、`main` ではPRを作成できません)。また、特定のパスでのみトリガーされます - このドキュメントが書かれた後に変更された場合に備えて、最新の定義は[こちら](https://github.com/huggingface/transformers/blob/main/.github/workflows/self-push.yml)の *push:* にあります。 3. このブランチからPRを作成します。 4. その後、このジョブが[ここ](https://github.com/huggingface/transformers/actions/workflows/self-push.yml)に表示されます。ジョブはバックログがある場合、すぐに実行されないことがあります。 ## Testing Experimental CI Features CI機能のテストは通常のCIの正常な動作に干渉する可能性があるため、新しいCI機能を追加する場合、以下の手順に従う必要があります。 1. テストが必要なものをテストするための新しい専用のジョブを作成します。 2. 新しいジョブは常に成功する必要があるため、常にグリーン ✓(詳細は以下参照)を表示する必要があります。 3. さまざまな種類のPR(ユーザーフォークブランチ、非フォークブランチ、github.com UIから直接ファイルを編集するブランチ、さまざまな強制プッシュなど)が実行されるまでいくつかの日間実行し、実験的なジョブのログを監視します(意図的に常にグリーンになるようになっている全体のジョブの緑ではなく)。 4. すべてが安定していることが明確になったら、新しい変更を既存のジョブに統合します。 このように、CI機能自体の実験が通常のワークフローに干渉しないようにできます。 では、新しいCI機能が開発中である間、ジョブを常に成功させるにはどうすればいいでしょうか? TravisCIのような一部のCIは `ignore-step-failure` をサポートし、全体のジョブを成功として報告しますが、この文書が作成された時点ではCircleCIとGithub Actionsはそれをサポートしていません。 したがって、以下のワークアラウンドを使用できます: 1. bashスクリプト内で潜在的な失敗を抑制するために実行コマンドの冒頭に `set +euo pipefail` を記述します。 2. 最後のコマンドは成功する必要があります。たとえば `echo "done"` または単に `true` を使用できます。 以下は例です: ```yaml - run: name: run CI experiment command: | set +euo pipefail echo "setting run-all-despite-any-errors-mode" this_command_will_fail echo "but bash continues to run" # emulate another failure false # but the last command must be a success echo "during experiment do not remove: reporting success to CI, even if there were failures" ``` 単純なコマンドの場合は、次のようにすることもできます。 ```bash cmd_that_may_fail || true ``` もちろん、結果に満足したら、実験的なステップやジョブを通常のジョブと統合し、`set +euo pipefail` などの追加した要素を削除して、実験的なジョブが通常のCIの動作に干渉しないようにします。 このプロセス全体は、実験的なステップに対して `allow-failure` のようなものを設定し、PRの全体のステータスに影響を与えずに失敗させることができれば、はるかに簡単になったでしょう。しかし、前述の通り、現在はCircleCIとGithub Actionsはこの機能をサポートしていません。 この機能に関しての投票や、CIに特有のスレッドでその進捗状況を確認できます: - [Github Actions:](https://github.com/actions/toolkit/issues/399) - [CircleCI:](https://ideas.circleci.com/ideas/CCI-I-344)
transformers/docs/source/ja/testing.md/0
{ "file_path": "transformers/docs/source/ja/testing.md", "repo_id": "transformers", "token_count": 22730 }
301
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 큰 모델 인스턴스화 [[instantiating-a-big-model]] 매우 큰 사전훈련된 모델을 사용하려면, RAM 사용을 최소화해야 하는 과제가 있습니다. 일반적인 PyTorch 워크플로우는 다음과 같습니다: 1. 무작위 가중치로 모델을 생성합니다. 2. 사전훈련된 가중치를 불러옵니다. 3. 사전훈련된 가중치를 무작위 모델에 적용합니다. 1단계와 2단계 모두 모델의 전체 버전을 메모리에 적재해야 하며, 대부분 문제가 없지만 모델이 기가바이트급의 용량을 차지하기 시작하면 복사본 2개가 RAM을 초과하여 메모리 부족 이슈를 야기할 수 있습니다. 더 심각한 문제는 분산 학습을 위해 `torch.distributed`를 사용하는 경우, 프로세스마다 사전훈련된 모델을 로드하고 복사본을 2개씩 RAM에 저장한다는 것입니다. <Tip> 무작위로 생성된 모델은 "비어 있는" (즉 그때 메모리에 있던 것으로 이뤄진) 텐서로 초기화되며 메모리 공간을 차지합니다. 초기화된 모델/파라미터의 종류에 적합한 분포(예: 정규 분포)에 따른 무작위 초기화는 가능한 한 빠르게 하기 위해 초기화되지 않은 가중치에 대해 3단계 이후에만 수행됩니다! </Tip> 이 안내서에서는 Transformers가 이 문제를 해결하기 위해 제공하는 솔루션을 살펴봅니다. 주의할 점은 아직 활발히 개발 중인 분야이므로 여기서 설명하는 API가 앞으로 약간 변경될 수 있다는 것입니다. ## 샤딩된 체크포인트 [[sharded-checkpoints]] 4.18.0 버전 이후, 10GB 이상의 공간을 차지하는 모델 체크포인트는 자동으로 작은 조각들로 샤딩됩니다. `model.save_pretrained(save_dir)`를 실행할 때 하나의 단일 체크포인트를 가지게 될 대신, 여러 부분 체크포인트(각각의 크기는 10GB 미만)와 매개변수 이름을 해당 파일에 매핑하는 인덱스가 생성됩니다. `max_shard_size` 매개변수로 샤딩 전 최대 크기를 제어할 수 있으므로, 이 예제를 위해 샤드 크기가 작은 일반 크기의 모델을 사용하겠습니다: 전통적인 BERT 모델을 사용해 봅시다. ```py from transformers import AutoModel model = AutoModel.from_pretrained("google-bert/bert-base-cased") ``` [`~PreTrainedModel.save_pretrained`]을 사용하여 모델을 저장하면, 모델의 구성과 가중치가 들어있는 두 개의 파일이 있는 새 폴더가 생성됩니다: ```py >>> import os >>> import tempfile >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir) ... print(sorted(os.listdir(tmp_dir))) ['config.json', 'pytorch_model.bin'] ``` 이제 최대 샤드 크기를 200MB로 사용해 봅시다: ```py >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="200MB") ... print(sorted(os.listdir(tmp_dir))) ['config.json', 'pytorch_model-00001-of-00003.bin', 'pytorch_model-00002-of-00003.bin', 'pytorch_model-00003-of-00003.bin', 'pytorch_model.bin.index.json'] ``` 모델의 구성에 더해, 세 개의 다른 가중치 파일과 파라미터 이름과 해당 파일의 매핑이 포함된 `index.json` 파일을 볼 수 있습니다. 이러한 체크포인트는 [`~PreTrainedModel.from_pretrained`] 메서드를 사용하여 완전히 다시 로드할 수 있습니다: ```py >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="200MB") ... new_model = AutoModel.from_pretrained(tmp_dir) ``` 큰 모델의 경우 이러한 방식으로 처리하는 주된 장점은 위에서 보여준 흐름의 2단계에서, 각 샤드가 이전 샤드 다음에 로드되므로 메모리 사용량이 모델 크기와 가장 큰 샤드의 크기를 초과하지 않는다는 점입니다. 이 인덱스 파일은 키가 체크포인트에 있는지, 그리고 해당 가중치가 어디에 저장되어 있는지를 결정하는 데 사용됩니다. 이 인덱스를 json과 같이 로드하고 딕셔너리를 얻을 수 있습니다: ```py >>> import json >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="200MB") ... with open(os.path.join(tmp_dir, "pytorch_model.bin.index.json"), "r") as f: ... index = json.load(f) >>> print(index.keys()) dict_keys(['metadata', 'weight_map']) ``` 메타데이터는 현재 모델의 총 크기만 포함됩니다. 앞으로 다른 정보를 추가할 계획입니다: ```py >>> index["metadata"] {'total_size': 433245184} ``` 가중치 맵은 이 인덱스의 주요 부분으로, 각 매개변수 이름(PyTorch 모델 `state_dict`에서 보통 찾을 수 있는)을 해당 파일에 매핑합니다: ```py >>> index["weight_map"] {'embeddings.LayerNorm.bias': 'pytorch_model-00001-of-00003.bin', 'embeddings.LayerNorm.weight': 'pytorch_model-00001-of-00003.bin', ... ``` 만약 [`~PreTrainedModel.from_pretrained`]를 사용하지 않고 모델 내에서 이러한 샤딩된 체크포인트를 직접 가져오려면 (전체 체크포인트를 위해 `model.load_state_dict()`를 수행하는 것처럼), [`~modeling_utils.load_sharded_checkpoint`]를 사용해야 합니다. ```py >>> from transformers.modeling_utils import load_sharded_checkpoint >>> with tempfile.TemporaryDirectory() as tmp_dir: ... model.save_pretrained(tmp_dir, max_shard_size="200MB") ... load_sharded_checkpoint(model, tmp_dir) ``` ## 저(低)메모리 로딩 [[low-memory-loading]] 샤딩된 체크포인트는 위에서 언급한 작업 흐름의 2단계에서 메모리 사용량을 줄이지만, 저(低)메모리 설정에서 모델을 사용하기 위해 우리의 Accelerate 라이브러리를 기반으로 한 도구를 활용하는 것이 좋습니다. 자세한 사항은 다음 가이드를 참조해주세요: [Accelerate로 대규모 모델 가져오기 (영문)](../en/main_classes/model#large-model-loading)
transformers/docs/source/ko/big_models.md/0
{ "file_path": "transformers/docs/source/ko/big_models.md", "repo_id": "transformers", "token_count": 4438 }
302
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LLM 추론 최적화 [[llm-inference-optimization]] 대규모 언어 모델(LLM)은 채팅 및 코드 완성 모델과 같은 텍스트 생성 응용 프로그램을 한 단계 끌어올리며, 높은 수준의 이해력과 유창함을 보여주는 텍스트를 생성합니다. 그러나 LLM을 강력하게 만드는 요소인 그들의 크기는 동시에 추론 과정에서 도전 과제가 되기도 합니다. 기본적인 추론은 느립니다, 왜냐하면 LLM이 다음 토큰을 생성하기 위해 반복적으로 호출되어야 하기 때문입니다. 생성이 진행됨에 따라 입력 시퀀스가 길어져 처리 시간이 점점 길어집니다. 또한, LLM은 수십억 개의 매개변수를 가지고 있어 모든 가중치를 메모리에 저장하고 처리하는 데 어려움이 있습니다. 이 가이드는 LLM 추론을 가속하기 위해 Transformers에서 사용할 수 있는 최적화 기술을 사용하는 방법을 보여줍니다. > [!TIP] > Hugging Face는 LLM을 추론에 최적화하여 배포하고 서비스하는 데 전념하는 라이브러리인 [Text Generation Inference (TGI)](https://hf.co/docs/text-generation-inference)을 제공합니다. 이 라이브러리는 처리량 증가를 위한 지속적인 배칭과 다중 GPU 추론을 위한 텐서 병렬화와 같은 Transformers에 포함되지 않은 배포 지향 최적화 기능을 포함합니다. ## 정적 kv-cache와 `torch.compile`[[static-kv-cache-and-torchcompile]] 디코딩 중에 LLM은 각 입력 토큰에 대한 key-value(kv) 값을 계산합니다. LLM은 자기회귀(autoregressive)이기 때문에 생성된 출력이 현재 입력의 일부가 되어 매번 동일한 kv 값을 계산합니다. 이는 매번 동일한 kv 값을 다시 계산하기 때문에 효율적이지 않습니다. 이를 최적화하기 위해, 이전 키(key)와 값(value)을 재계산하지 않고 저장하는 kv-cache를 사용할 수 있습니다. 그러나 kv-cache는 각 생성 단계에서 증가하며 동적이기 때문에 PyTorch 코드를 빠르고 최적화된 커널로 통합하는 강력한 최적화 도구인 [`torch.compile`](./perf_torch_compile)을 사용하는 데 제약이 있습니다. *정적 kv-cache*는 최댓값을 미리 할당하여 이 문제를 해결하여 `torch.compile`과 결합할 수 있게 합니다. 이를 통해 최대 4배의 속도 향상이 가능합니다. 속도 향상은 모델 크기(더 큰 모델은 속도 향상이 적음)와 하드웨어에 따라 다를 수 있습니다. > [!WARNING] 현재 [Llama](./model_doc/llama2) 및 몇 가지 다른 모델만 정적 kv-cache와 `torch.compile`을 지원합니다. 실시간 모델 호환성 목록은 [이 이슈](https://github.com/huggingface/transformers/issues/28981)를 확인하십시오. 작업의 복잡성에 따라 세 가지 방식의 정적 kv-cache 사용 방법이 있습니다: 1. 기본 사용법: `generation_config`에서 플래그를 설정하기만 하면 됩니다(권장); 2. 고급 사용법: 여러 번의 생성이나 맞춤형 생성 루프를 위해 캐시 객체를 처리합니다; 3. 고급 사용법: 단일 그래프가 필요한 경우, 전체 `generate` 함수를 하나의 그래프로 컴파일합니다. 올바른 탭을 선택하여 각 방법에 대한 추가 지침을 확인하세요. > [!TIP] > `torch.compile`을 사용할 때 어떤 전략을 사용하든, LLM 입력을 제한된 값 세트로 왼쪽에 패딩하면 모양과 관련된 재컴파일을 피할 수 있습니다. [`pad_to_multiple_of` tokenizer flag](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__.pad_to_multiple_of)가 유용할 것입니다! <hfoptions id="static-kv"> <hfoption id="basic usage: generation_config"> 이 예제에서는 [Gemma](https://hf.co/google/gemma-2b) 모델을 사용해 보겠습니다. 필요한 작업은 다음과 같습니다: 1. 모델의 `generation_config` 속성에 접근하여 `cache_implementation`을 "static"으로 설정합니다; 2. 모델의 `forward` 패스를 정적 kv-cache와 함께 컴파일하기 위해 `torch.compile`을 호출합니다. 이렇게 하면 끝입니다! ```py from transformers import AutoTokenizer, AutoModelForCausalLM import torch import os os.environ["TOKENIZERS_PARALLELISM"] = "false" # 긴 경고 메시지를 방지하기 위해 설정 :) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto") model.generation_config.cache_implementation = "static" model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) input_text = "The theory of special relativity states " input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['The theory of special relativity states 1. The speed of light is constant in all inertial reference'] ``` `generate` 함수는 내부적으로 동일한 캐시 객체를 재사용하려고 시도하며, 이를 통해 각 호출 시 재컴파일의 필요성을 제거합니다. 재컴파일을 피하는 것은 `torch.compile`의 성능을 최대한 활용하는 데 매우 중요하며, 다음 사항에 유의해야 합니다: 1. 배치 크기가 변경되거나 호출 간 최대 출력 길이가 증가하면 캐시를 다시 초기화해야 하며, 이로 인해 새로 컴파일을 해야 합니다; 2. 컴파일된 함수의 첫 몇 번의 호출은 함수가 컴파일되는 동안 더 느립니다. > [!WARNING] > 다중 턴 대화와 같은 정적 캐시의 고급 사용을 위해서는, 캐시 객체를 [`~GenerationMixin.generate`] 외부에서 인스턴스화하고 조작하는 것을 권장합니다. 고급 사용법 탭을 참조하세요. </hfoption> <hfoption id="advanced usage: control Static Cache"> [`StaticCache`] 객체는 `past_key_values` 인수로 모델의 [`~GenerationMixin.generate`] 함수에 전달할 수 있습니다. 이 객체는 캐시 내용을 유지하므로, 동적 캐시를 사용하는 것처럼 새로운 [`~GenerationMixin.generate`] 호출에 이를 전달하여 생성을 계속할 수 있습니다. ```py from transformers import AutoTokenizer, AutoModelForCausalLM, StaticCache import torch import os os.environ["TOKENIZERS_PARALLELISM"] = "false" # 긴 경고 메시지를 방지하기 위해 설정 :) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto") model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) input_text = "The theory of special relativity states " input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") prompt_length = input_ids.input_ids.shape[1] model.generation_config.max_new_tokens = 16 past_key_values = StaticCache( config=model.config, batch_size=1, # 캐시를 재사용할 계획이 있는 경우, 모든 경우에 충분한 캐시 길이를 설정해야 합니다 max_cache_len=prompt_length+(model.generation_config.max_new_tokens*2), device=model.device, dtype=model.dtype ) outputs = model.generate(**input_ids, past_key_values=past_key_values) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['The theory of special relativity states 1. The speed of light is constant in all inertial reference frames. 2'] # 생성된 텍스트와 동일한 캐시 객체를 전달하여, 중단한 곳에서 생성을 계속합니다. # 다중 턴 대화의 경우, 생성된 텍스트에 새로운 사용자 입력을 추가할 수 있습니다. new_input_ids = outputs outputs = model.generate(new_input_ids, past_key_values=past_key_values) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['The theory of special relativity states 1. The speed of light is constant in all inertial reference frames. 2. The speed of light is constant in all inertial reference frames. 3.'] ``` > [!TIP] > 동일한 [`StaticCache`] 객체를 새로운 프롬프트에 사용하려면, 호출 간에 `.reset()` 메서드를 사용하여 그 내용을 초기화하는 것이 좋습니다. 더 깊이 들어가고 싶다면, [`StaticCache`] 객체를 모델의 `forward` 패스에 동일한 `past_key_values` 인수로 전달할 수도 있습니다. 이 전략을 사용하면, 현재 토큰과 이전에 생성된 토큰의 위치 및 캐시 위치를 바탕으로 다음 토큰을 디코딩하는 자체 함수를 작성할 수 있습니다. ```py from transformers import LlamaTokenizer, LlamaForCausalLM, StaticCache, logging from transformers.testing_utils import CaptureLogger import torch prompts = [ "Simply put, the theory of relativity states that ", "My favorite all time favorite condiment is ketchup.", ] NUM_TOKENS_TO_GENERATE = 40 torch_device = "cuda" tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", pad_token="</s>", padding_side="right") model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", device_map="sequential") inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) def decode_one_tokens(model, cur_token, input_pos, cache_position, past_key_values): logits = model( cur_token, position_ids=input_pos, cache_position=cache_position, past_key_values=past_key_values, return_dict=False, use_cache=True )[0] new_token = torch.argmax(logits[:, -1], dim=-1)[:, None] return new_token ``` `StaticCache` 메서드를 사용하여 정적 kv-cache와 `torch.compile`을 활성화하려면 몇 가지 중요한 작업을 수행해야 합니다: 1. 추론에 모델을 사용하기 전에 [`StaticCache`] 인스턴스를 초기화합니다. 여기서 최대 배치 크기와 시퀀스 길이와 같은 매개변수를 설정할 수 있습니다. 2. 정적 kv-cache와 함께 순전파를 컴파일하기 위해 모델에 `torch.compile`을 호출합니다. 3. [torch.backends.cuda.sdp_kernel](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) 컨텍스트 관리자에서 `enable_math=True`를 설정하여 네이티브 PyTorch C++ 구현된 스케일된 점곱 어텐션(scaled dot product attention)을 활성화하여 추론 속도를 더욱 높입니다. ```py batch_size, seq_length = inputs["input_ids"].shape with torch.no_grad(): past_key_values = StaticCache( config=model.config, max_batch_size=2, max_cache_len=4096, device=torch_device, dtype=model.dtype ) cache_position = torch.arange(seq_length, device=torch_device) generated_ids = torch.zeros( batch_size, seq_length + NUM_TOKENS_TO_GENERATE + 1, dtype=torch.int, device=torch_device ) generated_ids[:, cache_position] = inputs["input_ids"].to(torch_device).to(torch.int) logits = model( **inputs, cache_position=cache_position, past_key_values=past_key_values,return_dict=False, use_cache=True )[0] next_token = torch.argmax(logits[:, -1], dim=-1)[:, None] generated_ids[:, seq_length] = next_token[:, 0] decode_one_tokens = torch.compile(decode_one_tokens, mode="reduce-overhead", fullgraph=True) cache_position = torch.tensor([seq_length + 1], device=torch_device) for _ in range(1, NUM_TOKENS_TO_GENERATE): with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True): next_token = decode_one_tokens(model, next_token.clone(), None, cache_position, past_key_values) generated_ids[:, cache_position] = next_token.int() cache_position += 1 text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) text ['Simply put, the theory of relativity states that 1) the speed of light is constant, 2) the speed of light is the same for all observers, and 3) the laws of physics are the same for all observers.', 'My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my p'] ``` </hfoption> <hfoption id="advanced usage: end-to-end generate compilation"> 전체 `generate` 함수를 컴파일하는 것은 코드 측면에서 기본 사용법보다 더 간단합니다. `generate` 함수에 대해 `torch.compile`을 호출하여 전체 함수를 컴파일하면 됩니다. 정적 캐시의 사용을 지정할 필요는 없습니다. 정적 캐시는 호환되지만, 벤치마크에서는 동적 캐시(기본 설정)가 더 빠른 것으로 나타났습니다. ```py from transformers import AutoTokenizer, AutoModelForCausalLM import torch import os os.environ["TOKENIZERS_PARALLELISM"] = "false" # 긴 경고 메시지를 방지하기 위해 설정 :) tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b") model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto") model.generate = torch.compile(model.generate, mode="reduce-overhead", fullgraph=True) input_text = "The theory of special relativity states " input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['The theory of special relativity states 1. The speed of light is constant in all inertial reference'] ``` 이 방법을 통해 모델의 forward 패스뿐만 아니라, 입력 준비, logit 처리기 작업 등을 포함한 모든 것을 컴파일합니다. 기본 사용 예제에 비해 `generate` 호출이 약간 더 빠를 수 있으며, 컴파일된 그래프는 더 특이한 하드웨어 장치나 사용 사례에 적합할 수 있습니다. 그러나 이 접근 방식을 사용하는 데는 몇 가지 큰 단점이 있습니다: 1. 컴파일 속도가 훨씬 느립니다; 2. `generate`의 모든 매개변수 설정은 `generation_config`를 통해서만 가능합니다; 3. 많은 경고와 예외가 억제됩니다. -- 먼저 컴파일 되지 않은 형태로 테스트하는 것을 권장합니다; 4. 현재 작업 중이지만 기능 제한이 심합니다(예: 작성 시점에서는 EOS 토큰이 선택되어도 생성이 중단되지 않습니다). </hfoption> </hfoptions> ## 추정 디코딩 [[speculative-decoding]] > [!TIP] > 보다 심층적인 설명을 원한다면, [Assisted Generation: a new direction toward low-latency text generation](https://hf.co/blog/assisted-generation) 블로그 게시물을 확인하십시오! 자기 회귀의 또 다른 문제는 각 입력 토큰에 대해 순전파 중에 모델 가중치를 매번 로드해야 한다는 점입니다. 이는 수십억 개의 매개변수를 가진 LLM에는 느리고 번거롭습니다. 추정 디코딩(speculative decoding)은 더 작고 빠른 보조 모델을 사용하여 후보 토큰을 생성하고, 이를 큰 LLM이 단일 순전파에서 검증하여 이 속도 저하를 완화합니다. 검증된 토큰이 정확하다면, LLM은 본래 자체적으로 생성하는 것처럼 토큰을 얻을 수 있습니다. 전방 패스가 동일한 출력을 보장하기 때문에 정확도 저하가 없습니다. 가장 큰 속도 향상을 얻기 위해, 보조 모델은 빠르게 토큰을 생성할 수 있도록 LLM보다 훨씬 작아야 합니다. 보조 모델과 LLM 모델은 토큰을 다시 인코딩하고 디코딩하지 않도록 동일한 토크나이저를 공유해야 합니다. > [!WARNING] > 추정 디코딩은 탐욕 검색과 샘플링 디코딩 전략에서만 지원되며, 배치 입력을 지원하지 않습니다. 보조 모델을 로드하고 이를 [`~GenerationMixin.generate`] 메서드에 전달하여 추정 디코딩을 활성화하십시오. <hfoptions id="spec-decoding"> <hfoption id="greedy search"> ```py from transformers import AutoModelForCausalLM, AutoTokenizer import torch device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b") inputs = tokenizer("Einstein's theory of relativity states", return_tensors="pt").to(device) model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b").to(device) assistant_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m").to(device) outputs = model.generate(**inputs, assistant_model=assistant_model) tokenizer.batch_decode(outputs, skip_special_tokens=True) ["Einstein's theory of relativity states that the speed of light is constant. "] ``` </hfoption> <hfoption id="sampling"> 추정 샘플링 디코딩(speculative sampling decoding)을 위해, 보조 모델 외에도 [`~GenerationMixin.generate`] 메서드에 `do_sample` 및 `temperature` 매개변수를 추가하십시오. ```py from transformers import AutoModelForCausalLM, AutoTokenizer import torch device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b") inputs = tokenizer("Einstein's theory of relativity states", return_tensors="pt").to(device) model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b").to(device) assistant_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m").to(device) outputs = model.generate(**inputs, assistant_model=assistant_model, do_sample=True, temperature=0.7) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ["Einstein's theory of relativity states that motion in the universe is not a straight line.\n"] ``` </hfoption> </hfoptions> ### 프롬프트 조회 디코딩 [[prompt-lookup-decoding]] 프롬프트 조회 디코딩은 탐욕 검색과 샘플링과도 호환되는 추정 디코딩의 변형입니다. 프롬프트 조회는 요약과 같은 입력 기반 작업에 특히 잘 작동합니다. 여기서는 프롬프트와 출력 간에 종종 겹치는 단어가 있습니다. 이러한 겹치는 n-그램이 LLM 후보 토큰으로 사용됩니다. 프롬프트 조회 디코딩을 활성화하려면 `prompt_lookup_num_tokens` 매개변수에 겹치는 토큰 수를 지정하십시오. 그런 다음 이 매개변수를 [`~GenerationMixin.generate`] 메서드에 전달할 수 있습니다. <hfoptions id="pld"> <hfoption id="greedy decoding"> ```py from transformers import AutoModelForCausalLM, AutoTokenizer import torch device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b") inputs = tokenizer("The second law of thermodynamics states", return_tensors="pt").to(device) model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b").to(device) assistant_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m").to(device) outputs = model.generate(**inputs, prompt_lookup_num_tokens=3) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ['The second law of thermodynamics states that entropy increases with temperature. '] ``` </hfoption> <hfoption id="sampling"> 샘플링과 함께 프롬프트 조회 디코딩을 사용하려면, [`~GenerationMixin.generate`] 메서드에 `do_sample` 및 `temperature` 매개변수를 추가하십시오. ```py from transformers import AutoModelForCausalLM, AutoTokenizer import torch device = "cuda" if torch.cuda.is_available() else "cpu" tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b") inputs = tokenizer("The second law of thermodynamics states", return_tensors="pt").to(device) model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b").to(device) outputs = model.generate(**inputs, prompt_lookup_num_tokens=3, do_sample=True, temperature=0.7) print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) ["The second law of thermodynamics states that energy cannot be created nor destroyed. It's not a"] ``` </hfoption> </hfoptions> ## 어텐션 최적화 [[attention-optimizations]] 트랜스포머 모델의 알려진 문제는 셀프 어텐션 메커니즘이 입력 토큰 수와 함께 계산 및 메모리가 제곱으로 증가한다는 것입니다. 이 제한은 훨씬 더 긴 시퀀스를 처리하는 LLM에서는 더욱 커집니다. 이를 해결하기 위해 FlashAttention2 또는 PyTorch의 스케일된 점곱 어텐션을 사용해 보십시오. 이들은 더 메모리 효율적인 어텐션 구현으로 추론을 가속화할 수 있습니다. ### FlashAttention-2 [[flashattention-2]] FlashAttention과 [FlashAttention-2](./perf_infer_gpu_one#flashattention-2)는 어텐션 계산을 더 작은 청크로 나누고 중간 읽기/쓰기 작업을 줄여 추론 속도를 높입니다. FlashAttention-2는 원래 FlashAttention 알고리즘을 개선하여 시퀀스 길이 차원에서도 병렬 처리를 수행하고 하드웨어에서 작업을 더 잘 분할하여 동기화 및 통신 오버헤드를 줄입니다. FlashAttention-2를 사용하려면 [`~PreTrainedModel.from_pretrained`] 메서드에서 `attn_implementation="flash_attention_2"`를 설정하십시오. ```py from transformers import AutoModelForCausalLM, BitsAndBytesConfig quant_config = BitsAndBytesConfig(load_in_8bit=True) model = AutoModelForCausalLM.from_pretrained( "google/gemma-2b", quantization_config=quant_config, torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", ) ``` ### PyTorch 스케일된 점곱 어텐션(scaled dot product attention) [[pytorch-scaled-dot-product-attention]] 스케일된 점곱 어텐션(SDPA)는 PyTorch 2.0에서 자동으로 활성화되며, FlashAttention, xFormers, PyTorch의 C++ 구현을 지원합니다. SDPA는 CUDA 백엔드를 사용하는 경우 가장 성능이 좋은 어텐션 알고리즘을 선택합니다. 다른 백엔드에서는 SDPA가 PyTorch C++ 구현으로 기본 설정됩니다. > [!TIP] > SDPA는 최신 PyTorch 버전이 설치되어 있으면 FlashAttention-2도 지원합니다. 세 가지 어텐션 알고리즘 중 하나를 명시적으로 활성화하거나 비활성화하려면 [torch.backends.cuda.sdp_kernel](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) 컨텍스트 관리자를 사용하십시오. 예를 들어 FlashAttention을 활성화하려면 `enable_flash=True`로 설정하십시오. ```py import torch from transformers import AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained( "google/gemma-2b", torch_dtype=torch.bfloat16, ) with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): outputs = model.generate(**inputs) ``` ## 양자화 [[quantization]] 양자화는 LLM 가중치를 더 낮은 정밀도로 저장하여 크기를 줄입니다. 이는 메모리 사용량을 줄이며 GPU 메모리에 제약이 있는 경우 추론을 위해 LLM을 로드하는 것을 더 용이하게 합니다. GPU가 충분하다면, 모델을 양자화할 필요는 없습니다. 추가적인 양자화 및 양자화 해제 단계로 인해 약간의 지연이 발생할 수 있기 때문입니다(AWQ 및 융합 AWQ 모듈 제외). > [!TIP] > 다양한 양자화 라이브러리(자세한 내용은 [Quantization](./quantization) 가이드를 참조하십시오)가 있습니다. 여기에는 Quanto, AQLM, AWQ 및 AutoGPTQ가 포함됩니다. 사용 사례에 가장 잘 맞는 라이브러리를 사용해 보십시오. 또한 AutoGPTQ와 bitsandbytes를 비교하는 [Overview of natively supported quantization schemes in 🤗 Transformers](https://hf.co/blog/overview-quantization-transformers) 블로그 게시물을 읽어보는 것을 추천합니다. 아래의 모델 메모리 계산기를 사용하여 모델을 로드하는 데 필요한 메모리를 추정하고 비교해 보십시오. 예를 들어 [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)를 로드하는 데 필요한 메모리를 추정해 보십시오. <iframe src="https://hf-accelerate-model-memory-usage.hf.space" frameborder="0" width="850" height="450" ></iframe> Mistral-7B-v0.1을 반정밀도로 로드하려면 [`~transformers.AutoModelForCausalLM.from_pretrained`] 메서드에서 `torch_dtype` 매개변수를 `torch.bfloat16`으로 설정하십시오. 이 경우 13.74GB의 메모리가 필요합니다. ```py from transformers import AutoTokenizer, AutoModelForCausalLM import torch model = AutoModelForCausalLM.from_pretrained( "mistralai/Mistral-7B-v0.1", torch_dtype=torch.bfloat16, device_map="auto", ) ``` 추론을 위해 양자화된 모델(8비트 또는 4비트)을 로드하려면 [bitsandbytes](https://hf.co/docs/bitsandbytes)를 사용하고 `load_in_4bit` 또는 `load_in_8bit` 매개변수를 `True`로 설정하십시오. 모델을 8비트로 로드하는 데는 6.87GB의 메모리만 필요합니다. ```py from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig import torch quant_config = BitsAndBytesConfig(load_in_8bit=True) model = AutoModelForCausalLM.from_pretrained( "mistralai/Mistral-7B-v0.1", quantization_config=quant_config, device_map="auto" ) ```
transformers/docs/source/ko/llm_optims.md/0
{ "file_path": "transformers/docs/source/ko/llm_optims.md", "repo_id": "transformers", "token_count": 15193 }
303
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CPU에서 효율적인 훈련 [[efficient-training-on-cpu]] 이 가이드는 CPU에서 대규모 모델을 효율적으로 훈련하는 데 초점을 맞춥니다. ## IPEX와 혼합 정밀도 [[mixed-precision-with-ipex]] IPEX는 AVX-512 이상을 지원하는 CPU에 최적화되어 있으며, AVX2만 지원하는 CPU에도 기능적으로 작동합니다. 따라서 AVX-512 이상의 Intel CPU 세대에서는 성능상 이점이 있을 것으로 예상되지만, AVX2만 지원하는 CPU (예: AMD CPU 또는 오래된 Intel CPU)의 경우에는 IPEX 아래에서 더 나은 성능을 보일 수 있지만 이는 보장되지 않습니다. IPEX는 Float32와 BFloat16를 모두 사용하여 CPU 훈련을 위한 성능 최적화를 제공합니다. BFloat16의 사용은 다음 섹션의 주요 초점입니다. 저정밀도 데이터 타입인 BFloat16은 3세대 Xeon® Scalable 프로세서 (코드명: Cooper Lake)에서 AVX512 명령어 집합을 네이티브로 지원해 왔으며, 다음 세대의 Intel® Xeon® Scalable 프로세서에서 Intel® Advanced Matrix Extensions (Intel® AMX) 명령어 집합을 지원하여 성능을 크게 향상시킬 예정입니다. CPU 백엔드의 자동 혼합 정밀도 기능은 PyTorch-1.10부터 활성화되었습니다. 동시에, Intel® Extension for PyTorch에서 BFloat16에 대한 CPU의 자동 혼합 정밀도 및 연산자의 BFloat16 최적화를 대규모로 활성화하고, PyTorch 마스터 브랜치로 부분적으로 업스트림을 반영했습니다. 사용자들은 IPEX 자동 혼합 정밀도를 사용하여 더 나은 성능과 사용자 경험을 얻을 수 있습니다. [자동 혼합 정밀도](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features/amp.html)에 대한 자세한 정보를 확인하십시오. ### IPEX 설치: [[ipex-installation]] IPEX 릴리스는 PyTorch를 따라갑니다. pip를 통해 설치하려면: | PyTorch Version | IPEX version | | :---------------: | :----------: | | 1.13 | 1.13.0+cpu | | 1.12 | 1.12.300+cpu | | 1.11 | 1.11.200+cpu | | 1.10 | 1.10.100+cpu | ```bash pip install intel_extension_for_pytorch==<version_name> -f https://developer.intel.com/ipex-whl-stable-cpu ``` [IPEX 설치](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/installation.html)에 대한 더 많은 접근 방법을 확인하십시오. ### Trainer에서의 사용법 [[usage-in-trainer]] Trainer에서 IPEX의 자동 혼합 정밀도를 활성화하려면 사용자는 훈련 명령 인수에 `use_ipex`, `bf16`, `no_cuda`를 추가해야 합니다. [Transformers 질문-응답](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)의 사용 사례를 살펴보겠습니다. - CPU에서 BF16 자동 혼합 정밀도를 사용하여 IPEX로 훈련하기: <pre> python run_qa.py \ --model_name_or_path google-bert/bert-base-uncased \ --dataset_name squad \ --do_train \ --do_eval \ --per_device_train_batch_size 12 \ --learning_rate 3e-5 \ --num_train_epochs 2 \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/debug_squad/ \ <b>--use_ipex \</b> <b>--bf16 --no_cuda</b></pre> ### 실습 예시 [[practice-example]] 블로그: [Intel Sapphire Rapids로 PyTorch Transformers 가속화](https://huggingface.co/blog/intel-sapphire-rapids)
transformers/docs/source/ko/perf_train_cpu.md/0
{ "file_path": "transformers/docs/source/ko/perf_train_cpu.md", "repo_id": "transformers", "token_count": 2394 }
304
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 둘러보기 [[quick-tour]] [[open-in-colab]] 🤗 Transformers를 시작해보세요! 개발해본 적이 없더라도 쉽게 읽을 수 있도록 쓰인 이 글은 [`pipeline`](./main_classes/pipelines)을 사용하여 추론하고, 사전학습된 모델과 전처리기를 [AutoClass](./model_doc/auto)로 로드하고, PyTorch 또는 TensorFlow로 모델을 빠르게 학습시키는 방법을 소개해 드릴 것입니다. 본 가이드에서 소개되는 개념을 (특히 초보자의 관점으로) 더 친절하게 접하고 싶다면, 튜토리얼이나 [코스](https://huggingface.co/course/chapter1/1)를 참조하기를 권장합니다. 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```bash !pip install transformers datasets evaluate accelerate ``` 또한 선호하는 머신 러닝 프레임워크를 설치해야 합니다: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> ## 파이프라인 [[pipeline]] <Youtube id="tiZFewofSLM"/> [`pipeline`](./main_classes/pipelines)은 사전 훈련된 모델로 추론하기에 가장 쉽고 빠른 방법입니다. [`pipeline`]은 여러 모달리티에서 다양한 과업을 쉽게 처리할 수 있으며, 아래 표에 표시된 몇 가지 과업을 기본적으로 지원합니다: <Tip> 사용 가능한 작업의 전체 목록은 [Pipelines API 참조](./main_classes/pipelines)를 확인하세요. </Tip> | **태스크** | **설명** | **모달리티** | **파이프라인 ID** | |-----------------|----------------------------------------------------------------------|------------------|-----------------------------------------------| | 텍스트 분류 | 텍스트에 알맞은 레이블 붙이기 | 자연어 처리(NLP) | pipeline(task="sentiment-analysis") | | 텍스트 생성 | 주어진 문자열 입력과 이어지는 텍스트 생성하기 | 자연어 처리(NLP) | pipeline(task="text-generation") | | 개체명 인식 | 문자열의 각 토큰마다 알맞은 레이블 붙이기 (인물, 조직, 장소 등등) | 자연어 처리(NLP) | pipeline(task="ner") | | 질의응답 | 주어진 문맥과 질문에 따라 올바른 대답하기 | 자연어 처리(NLP) | pipeline(task="question-answering") | | 빈칸 채우기 | 문자열의 빈칸에 알맞은 토큰 맞추기 | 자연어 처리(NLP) | pipeline(task="fill-mask") | | 요약 | 텍스트나 문서를 요약하기 | 자연어 처리(NLP) | pipeline(task="summarization") | | 번역 | 텍스트를 한 언어에서 다른 언어로 번역하기 | 자연어 처리(NLP) | pipeline(task="translation") | | 이미지 분류 | 이미지에 알맞은 레이블 붙이기 | 컴퓨터 비전(CV) | pipeline(task="image-classification") | | 이미지 분할 | 이미지의 픽셀마다 레이블 붙이기(시맨틱, 파놉틱 및 인스턴스 분할 포함) | 컴퓨터 비전(CV) | pipeline(task="image-segmentation") | | 객체 탐지 | 이미지 속 객체의 경계 상자를 그리고 클래스를 예측하기 | 컴퓨터 비전(CV) | pipeline(task="object-detection") | | 오디오 분류 | 오디오 파일에 알맞은 레이블 붙이기 | 오디오 | pipeline(task="audio-classification") | | 자동 음성 인식 | 오디오 파일 속 음성을 텍스트로 바꾸기 | 오디오 | pipeline(task="automatic-speech-recognition") | | 시각 질의응답 | 주어진 이미지와 질문에 대해 올바르게 대답하기 | 멀티모달 | pipeline(task="vqa") | | 문서 질의응답 | 주어진 문서와 질문에 대해 올바르게 대답하기 | 멀티모달 | pipeline(task="document-question-answering") | | 이미지 캡션 달기 | 주어진 이미지의 캡션 생성하기 | 멀티모달 | pipeline(task="image-to-text") | 먼저 [`pipeline`]의 인스턴스를 생성하고 사용할 작업을 지정합니다. 이 가이드에서는 감정 분석을 위해 [`pipeline`]을 사용하는 예제를 보여드리겠습니다: ```py >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` [`pipeline`]은 감정 분석을 위한 [사전 훈련된 모델](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english)과 토크나이저를 자동으로 다운로드하고 캐시합니다. 이제 `classifier`를 대상 텍스트에 사용할 수 있습니다: ```py >>> classifier("We are very happy to show you the 🤗 Transformers library.") [{'label': 'POSITIVE', 'score': 0.9998}] ``` 만약 입력이 여러 개 있는 경우, 입력을 리스트로 [`pipeline`]에 전달하여, 사전 훈련된 모델의 출력을 딕셔너리로 이루어진 리스트 형태로 받을 수 있습니다: ```py >>> results = classifier(["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."]) >>> for result in results: ... print(f"label: {result['label']}, with score: {round(result['score'], 4)}") label: POSITIVE, with score: 0.9998 label: NEGATIVE, with score: 0.5309 ``` [`pipeline`]은 주어진 과업에 관계없이 데이터셋 전부를 순회할 수도 있습니다. 이 예제에서는 자동 음성 인식을 과업으로 선택해 보겠습니다: ```py >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` 데이터셋을 로드할 차례입니다. (자세한 내용은 🤗 Datasets [시작하기](https://huggingface.co/docs/datasets/quickstart#audio)을 참조하세요) 여기에서는 [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) 데이터셋을 로드하겠습니다: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` 데이터셋의 샘플링 레이트가 기존 모델인 [`facebook/wav2vec2-base-960h`](https://huggingface.co/facebook/wav2vec2-base-960h)의 훈련 당시 샘플링 레이트와 일치하는지 확인해야 합니다: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` `"audio"` 열을 호출하면 자동으로 오디오 파일을 가져와서 리샘플링합니다. 첫 4개 샘플에서 원시 웨이브폼 배열을 추출하고 파이프라인에 리스트로 전달하세요: ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HELL T WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AN I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I FURN A JOINA COUT'] ``` 음성이나 비전과 같이 입력이 큰 대규모 데이터셋의 경우, 모든 입력을 메모리에 로드하려면 리스트 대신 제너레이터 형태로 전달해야 합니다. 자세한 내용은 [Pipelines API 참조](./main_classes/pipelines)를 확인하세요. ### 파이프라인에서 다른 모델과 토크나이저 사용하기 [[use-another-model-and-tokenizer-in-the-pipeline]] [`pipeline`]은 [Hub](https://huggingface.co/models)의 모든 모델을 사용할 수 있기 때문에, [`pipeline`]을 다른 용도에 맞게 쉽게 수정할 수 있습니다. 예를 들어, 프랑스어 텍스트를 처리할 수 있는 모델을 사용하기 위해선 Hub의 태그를 사용하여 적절한 모델을 필터링하면 됩니다. 필터링된 결과의 상위 항목으로는 프랑스어 텍스트에 사용할 수 있는 다국어 [BERT 모델](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment)이 반환됩니다: ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> [`AutoModelForSequenceClassification`]과 [`AutoTokenizer`]를 사용하여 사전 훈련된 모델과 관련된 토크나이저를 로드하세요 (다음 섹션에서 [`AutoClass`]에 대해 더 자세히 알아보겠습니다): ```py >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> [`TFAutoModelForSequenceClassification`]과 [`AutoTokenizer`]를 사용하여 사전 훈련된 모델과 관련된 토크나이저를 로드하세요 (다음 섹션에서 [`TFAutoClass`]에 대해 더 자세히 알아보겠습니다): ```py >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> [`pipeline`]에서 모델과 토크나이저를 지정하면, 이제 `classifier`를 프랑스어 텍스트에 적용할 수 있습니다: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` 마땅한 모델을 찾을 수 없는 경우 데이터를 기반으로 사전 훈련된 모델을 미세조정해야 합니다. 미세조정 방법에 대한 자세한 내용은 [미세조정 튜토리얼](./training)을 참조하세요. 사전 훈련된 모델을 미세조정한 후에는 모델을 Hub의 커뮤니티와 공유하여 머신러닝 민주화에 기여해주세요! 🤗 ## AutoClass [[autoclass]] <Youtube id="AhChOFRegn4"/> [`AutoModelForSequenceClassification`]과 [`AutoTokenizer`] 클래스는 위에서 다룬 [`pipeline`]의 기능을 구현하는 데 사용됩니다. [AutoClass](./model_doc/auto)는 사전 훈련된 모델의 아키텍처를 이름이나 경로에서 자동으로 가져오는 '바로가기'입니다. 과업에 적합한 `AutoClass`를 선택하고 해당 전처리 클래스를 선택하기만 하면 됩니다. 이전 섹션의 예제로 돌아가서 [`pipeline`]의 결과를 `AutoClass`를 활용해 복제하는 방법을 살펴보겠습니다. ### AutoTokenizer [[autotokenizer]] 토크나이저는 텍스트를 모델의 입력으로 사용하기 위해 숫자 배열 형태로 전처리하는 역할을 담당합니다. 토큰화 과정에는 단어를 어디에서 끊을지, 어느 수준까지 나눌지와 같은 여러 규칙들이 있습니다 (토큰화에 대한 자세한 내용은 [토크나이저 요약](./tokenizer_summary)을 참조하세요). 가장 중요한 점은 모델이 사전 훈련된 모델과 동일한 토큰화 규칙을 사용하도록 동일한 모델 이름으로 토크나이저를 인스턴스화해야 한다는 것입니다. [`AutoTokenizer`]로 토크나이저를 로드하세요: ```py >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` 텍스트를 토크나이저에 전달하세요: ```py >>> encoding = tokenizer("We are very happy to show you the 🤗 Transformers library.") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` 토크나이저는 다음을 포함한 딕셔너리를 반환합니다: * [input_ids](./glossary#input-ids): 토큰의 숫자 표현. * [attention_mask](.glossary#attention-mask): 어떤 토큰에 주의를 기울여야 하는지를 나타냅니다. 토크나이저는 입력을 리스트 형태로도 받을 수 있으며, 텍스트를 패딩하고 잘라내어 일정한 길이의 묶음을 반환할 수도 있습니다: <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> <Tip> [전처리](./preprocessing) 튜토리얼을 참조하시면 토큰화에 대한 자세한 설명과 함께 이미지, 오디오와 멀티모달 입력을 전처리하기 위한 [`AutoImageProcessor`]와 [`AutoFeatureExtractor`], [`AutoProcessor`]의 사용방법도 알 수 있습니다. </Tip> ### AutoModel [[automodel]] <frameworkcontent> <pt> 🤗 Transformers는 사전 훈련된 인스턴스를 간단하고 통합된 방법으로 로드할 수 있습니다. 즉, [`AutoTokenizer`]처럼 [`AutoModel`]을 로드할 수 있습니다. 유일한 차이점은 과업에 알맞은 [`AutoModel`]을 선택해야 한다는 점입니다. 텍스트 (또는 시퀀스) 분류의 경우 [`AutoModelForSequenceClassification`]을 로드해야 합니다: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> [`AutoModel`] 클래스에서 지원하는 과업에 대해서는 [과업 요약](./task_summary)을 참조하세요. </Tip> 이제 전처리된 입력 묶음을 직접 모델에 전달해야 합니다. 아래처럼 `**`를 앞에 붙여 딕셔너리를 풀어주면 됩니다: ```py >>> pt_outputs = pt_model(**pt_batch) ``` 모델의 최종 활성화 함수 출력은 `logits` 속성에 담겨있습니다. `logits`에 softmax 함수를 적용하여 확률을 얻을 수 있습니다: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformers는 사전 훈련된 인스턴스를 간단하고 통합된 방법으로 로드할 수 있습니다. 즉, [`AutoTokenizer`]처럼 [`TFAutoModel`]을 로드할 수 있습니다. 유일한 차이점은 과업에 알맞은 [`TFAutoModel`]을 선택해야 한다는 점입니다. 텍스트 (또는 시퀀스) 분류의 경우 [`TFAutoModelForSequenceClassification`]을 로드해야 합니다: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> [`AutoModel`] 클래스에서 지원하는 과업에 대해서는 [과업 요약](./task_summary)을 참조하세요. </Tip> 이제 전처리된 입력 묶음을 직접 모델에 전달해야 합니다. 아래처럼 그대로 텐서를 전달하면 됩니다: ```py >>> tf_outputs = tf_model(tf_batch) ``` 모델의 최종 활성화 함수 출력은 `logits` 속성에 담겨있습니다. `logits`에 softmax 함수를 적용하여 확률을 얻을 수 있습니다: ```py >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> 모든 🤗 Transformers 모델(PyTorch 또는 TensorFlow)은 (softmax와 같은) 최종 활성화 함수 *이전에* 텐서를 출력합니다. 왜냐하면 최종 활성화 함수의 출력은 종종 손실 함수 출력과 결합되기 때문입니다. 모델 출력은 특수한 데이터 클래스이므로 IDE에서 자동 완성됩니다. 모델 출력은 튜플이나 딕셔너리처럼 동작하며 (정수, 슬라이스 또는 문자열로 인덱싱 가능), None인 속성은 무시됩니다. </Tip> ### 모델 저장하기 [[save-a-model]] <frameworkcontent> <pt> 미세조정된 모델을 토크나이저와 함께 저장하려면 [`PreTrainedModel.save_pretrained`]를 사용하세요: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` 모델을 다시 사용하려면 [`PreTrainedModel.from_pretrained`]로 모델을 다시 로드하세요: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> 미세조정된 모델을 토크나이저와 함께 저장하려면 [`TFPreTrainedModel.save_pretrained`]를 사용하세요: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` 모델을 다시 사용하려면 [`TFPreTrainedModel.from_pretrained`]로 모델을 다시 로드하세요: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> 🤗 Transformers의 멋진 기능 중 하나는 모델을 PyTorch 또는 TensorFlow 모델로 저장해뒀다가 다른 프레임워크로 다시 로드할 수 있는 점입니다. `from_pt` 또는 `from_tf` 매개변수를 사용하여 모델을 한 프레임워크에서 다른 프레임워크로 변환할 수 있습니다: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </tf> </frameworkcontent> ## 커스텀 모델 구축하기 [[custom-model-builds]] 모델의 구성 클래스를 수정하여 모델의 구조를 바꿀 수 있습니다. (은닉층이나 어텐션 헤드의 수와 같은) 모델의 속성은 구성에서 지정되기 때문입니다. 커스텀 구성 클래스로 모델을 만들면 처음부터 시작해야 합니다. 모델 속성은 무작위로 초기화되므로 의미 있는 결과를 얻으려면 먼저 모델을 훈련시켜야 합니다. 먼저 [`AutoConfig`]를 가져오고 수정하고 싶은 사전학습된 모델을 로드하세요. [`AutoConfig.from_pretrained`] 내부에서 (어텐션 헤드 수와 같이) 변경하려는 속성를 지정할 수 있습니다: ```py >>> from transformers import AutoConfig >>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12) ``` <frameworkcontent> <pt> [`AutoModel.from_config`]를 사용하여 바꾼 구성대로 모델을 생성하세요: ```py >>> from transformers import AutoModel >>> my_model = AutoModel.from_config(my_config) ``` </pt> <tf> [`TFAutoModel.from_config`]를 사용하여 바꾼 구성대로 모델을 생성하세요: ```py >>> from transformers import TFAutoModel >>> my_model = TFAutoModel.from_config(my_config) ``` </tf> </frameworkcontent> 커스텀 구성에 대한 자세한 내용은 [커스텀 아키텍처 만들기](./create_a_model) 가이드를 확인하세요. ## Trainer - PyTorch에 최적화된 훈련 루프 [[trainer-a-pytorch-optimized-training-loop]] 모든 모델은 [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)이므로 일반적인 훈련 루프에서 사용할 수 있습니다. 직접 훈련 루프를 작성할 수도 있지만, 🤗 Transformers는 PyTorch를 위한 [`Trainer`] 클래스를 제공합니다. 이 클래스에는 기본 훈련 루프가 포함되어 있으며 분산 훈련, 혼합 정밀도 등과 같은 기능을 추가로 제공합니다. 과업에 따라 다르지만 일반적으로 [`Trainer`]에 다음 매개변수를 전달합니다: 1. [`PreTrainedModel`] 또는 [`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)로 시작합니다: ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. [`TrainingArguments`]는 학습률, 배치 크기, 훈련할 에포크 수와 같은 모델 하이퍼파라미터를 포함합니다. 훈련 인자를 지정하지 않으면 기본값이 사용됩니다: ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="path/to/save/folder/", ... learning_rate=2e-5, ... per_device_train_batch_size=8, ... per_device_eval_batch_size=8, ... num_train_epochs=2, ... ) ``` 3. 토크나이저, 이미지 프로세서, 특징 추출기(feature extractor) 또는 프로세서와 전처리 클래스를 로드하세요: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 4. 데이터셋을 로드하세요: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes") # doctest: +IGNORE_RESULT ``` 5. 데이터셋을 토큰화하는 함수를 생성하세요: ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) ``` 그리고 [`~datasets.Dataset.map`]로 데이터셋 전체에 적용하세요: ```py >>> dataset = dataset.map(tokenize_dataset, batched=True) ``` 6. [`DataCollatorWithPadding`]을 사용하여 데이터셋의 표본 묶음을 만드세요: ```py >>> from transformers import DataCollatorWithPadding >>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer) ``` 이제 위의 모든 클래스를 [`Trainer`]로 모으세요: ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=dataset["train"], ... eval_dataset=dataset["test"], ... tokenizer=tokenizer, ... data_collator=data_collator, ... ) # doctest: +SKIP ``` 준비가 되었으면 [`~Trainer.train`]을 호출하여 훈련을 시작하세요: ```py >>> trainer.train() # doctest: +SKIP ``` <Tip> 번역이나 요약과 같이 시퀀스-시퀀스 모델을 사용하는 과업에는 [`Seq2SeqTrainer`] 및 [`Seq2SeqTrainingArguments`] 클래스를 사용하세요. </Tip> [`Trainer`] 내의 메서드를 서브클래스화하여 훈련 루프를 바꿀 수도 있습니다. 이러면 손실 함수, 옵티마이저, 스케줄러와 같은 기능 또한 바꿀 수 있게 됩니다. 변경 가능한 메소드에 대해서는 [`Trainer`] 문서를 참고하세요. 훈련 루프를 수정하는 다른 방법은 [Callbacks](./main_classes/callback)를 사용하는 것입니다. Callbacks로 다른 라이브러리와 통합하고, 훈련 루프를 체크하여 진행 상황을 보고받거나, 훈련을 조기에 중단할 수 있습니다. Callbacks은 훈련 루프 자체를 바꾸지는 않습니다. 손실 함수와 같은 것을 바꾸려면 [`Trainer`]를 서브클래스화해야 합니다. ## TensorFlow로 훈련시키기 [[train-with-tensorflow]] 모든 모델은 [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)이므로 [Keras](https://keras.io/) API를 통해 TensorFlow에서 훈련시킬 수 있습니다. 🤗 Transformers는 데이터셋을 쉽게 `tf.data.Dataset` 형태로 쉽게 로드할 수 있는 [`~TFPreTrainedModel.prepare_tf_dataset`] 메소드를 제공하기 때문에, Keras의 [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) 및 [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) 메소드로 바로 훈련을 시작할 수 있습니다. 1. [`TFPreTrainedModel`] 또는 [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)로 시작합니다: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. 토크나이저, 이미지 프로세서, 특징 추출기(feature extractor) 또는 프로세서와 같은 전처리 클래스를 로드하세요: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 3. 데이터셋을 토큰화하는 함수를 생성하세요: ```py >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) # doctest: +SKIP ``` 4. [`~datasets.Dataset.map`]을 사용하여 전체 데이터셋에 토큰화 함수를 적용하고, 데이터셋과 토크나이저를 [`~TFPreTrainedModel.prepare_tf_dataset`]에 전달하세요. 배치 크기를 변경하거나 데이터셋을 섞을 수도 있습니다: ```py >>> dataset = dataset.map(tokenize_dataset) # doctest: +SKIP >>> tf_dataset = model.prepare_tf_dataset( ... dataset["train"], batch_size=16, shuffle=True, tokenizer=tokenizer ... ) # doctest: +SKIP ``` 5. 준비되었으면 `compile` 및 `fit`를 호출하여 훈련을 시작하세요. 🤗 Transformers의 모든 모델은 과업과 관련된 기본 손실 함수를 가지고 있으므로 명시적으로 지정하지 않아도 됩니다: ```py >>> from tensorflow.keras.optimizers import Adam >>> model.compile(optimizer=Adam(3e-5)) # No loss argument! >>> model.fit(tf_dataset) # doctest: +SKIP ``` ## 다음 단계는 무엇인가요? [[whats-next]] 🤗 Transformers 둘러보기를 모두 읽으셨다면, 가이드를 살펴보고 더 구체적인 것을 수행하는 방법을 알아보세요. 이를테면 커스텀 모델 구축하는 방법, 과업에 알맞게 모델을 미세조정하는 방법, 스크립트로 모델 훈련하는 방법 등이 있습니다. 🤗 Transformers 핵심 개념에 대해 더 알아보려면 커피 한 잔 들고 개념 가이드를 살펴보세요!
transformers/docs/source/ko/quicktour.md/0
{ "file_path": "transformers/docs/source/ko/quicktour.md", "repo_id": "transformers", "token_count": 17530 }
305
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 마스킹된 언어 모델링(Masked language modeling)[[masked-language-modeling]] [[open-in-colab]] <Youtube id="mqElG5QJWUg"/> 마스킹된 언어 모델링은 시퀀스에서 마스킹된 토큰을 예측하며, 모델은 양방향으로 토큰에 액세스할 수 있습니다. 즉, 모델은 토큰의 왼쪽과 오른쪽 양쪽에서 접근할 수 있습니다. 마스킹된 언어 모델링은 전체 시퀀스에 대한 문맥적 이해가 필요한 작업에 적합하며, BERT가 그 예에 해당합니다. 이번 가이드에서 다룰 내용은 다음과 같습니다: 1. [ELI5](https://huggingface.co/datasets/eli5) 데이터 세트에서 [r/askscience](https://www.reddit.com/r/askscience/) 부분을 사용해 [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) 모델을 미세 조정합니다. 2. 추론 시에 직접 미세 조정한 모델을 사용합니다. <Tip> 이 작업과 호환되는 모든 아키텍처와 체크포인트를 보려면 [작업 페이지](https://huggingface.co/tasks/fill-mask)를 확인하는 것이 좋습니다. </Tip> 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```bash pip install transformers datasets evaluate ``` Hugging Face 계정에 로그인하여 모델을 업로드하고 커뮤니티와의 공유를 권장합니다. 메시지가 표시되면(When prompted) 토큰을 입력하여 로그인합니다: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## ELI5 데이터 세트 가져오기[[load-eli5-dataset]] 먼저 🤗 Datasets 라이브러리에서 ELI5 데이터 세트의 r/askscience 중 일부만 가져옵니다. 이렇게 하면 전체 데이터 세트 학습에 더 많은 시간을 할애하기 전에 모든 것이 작동하는지 실험하고 확인할 수 있습니다. ```py >>> from datasets import load_dataset >>> eli5 = load_dataset("eli5", split="train_asks[:5000]") ``` 데이터 세트의 `train_asks`를 [`~datasets.Dataset.train_test_split`] 메소드를 사용해 훈련 데이터와 테스트 데이터로 분할합니다: ```py >>> eli5 = eli5.train_test_split(test_size=0.2) ``` 그리고 아래 예시를 살펴보세요: ```py >>> eli5["train"][0] {'answers': {'a_id': ['c3d1aib', 'c3d4lya'], 'score': [6, 3], 'text': ["The velocity needed to remain in orbit is equal to the square root of Newton's constant times the mass of earth divided by the distance from the center of the earth. I don't know the altitude of that specific mission, but they're usually around 300 km. That means he's going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.", "Hope you don't mind me asking another question, but why aren't there any stars visible in this photo?"]}, 'answers_urls': {'url': []}, 'document': '', 'q_id': 'nyxfp', 'selftext': '_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?', 'selftext_urls': {'url': ['http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg']}, 'subreddit': 'askscience', 'title': 'Few questions about this space walk photograph.', 'title_urls': {'url': []}} ``` 많아 보일 수 있지만 실제로는 `text` 필드에만 집중하면 됩나다. 언어 모델링 작업의 멋진 점은 (비지도 학습으로) *다음 단어가 레이블*이기 때문에 레이블이 따로 필요하지 않습니다. ## 전처리[[preprocess]] <Youtube id="8PmhEIXhBvI"/> 마스킹된 언어 모델링을 위해, 다음 단계로 DistilRoBERTa 토크나이저를 가져와서 `text` 하위 필드를 처리합니다: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilroberta-base") ``` 위의 예제에서와 마찬가지로, `text` 필드는 `answers` 안에 중첩되어 있습니다. 따라서 중첩된 구조에서 [`flatten`](https://huggingface.co/docs/datasets/process#flatten) 메소드를 사용하여 `text` 하위 필드를 추출합니다: ```py >>> eli5 = eli5.flatten() >>> eli5["train"][0] {'answers.a_id': ['c3d1aib', 'c3d4lya'], 'answers.score': [6, 3], 'answers.text': ["The velocity needed to remain in orbit is equal to the square root of Newton's constant times the mass of earth divided by the distance from the center of the earth. I don't know the altitude of that specific mission, but they're usually around 300 km. That means he's going 7-8 km/s.\n\nIn space there are no other forces acting on either the shuttle or the guy, so they stay in the same position relative to each other. If he were to become unable to return to the ship, he would presumably run out of oxygen, or slowly fall into the atmosphere and burn up.", "Hope you don't mind me asking another question, but why aren't there any stars visible in this photo?"], 'answers_urls.url': [], 'document': '', 'q_id': 'nyxfp', 'selftext': '_URL_0_\n\nThis was on the front page earlier and I have a few questions about it. Is it possible to calculate how fast the astronaut would be orbiting the earth? Also how does he stay close to the shuttle so that he can return safely, i.e is he orbiting at the same speed and can therefore stay next to it? And finally if his propulsion system failed, would he eventually re-enter the atmosphere and presumably die?', 'selftext_urls.url': ['http://apod.nasa.gov/apod/image/1201/freeflyer_nasa_3000.jpg'], 'subreddit': 'askscience', 'title': 'Few questions about this space walk photograph.', 'title_urls.url': []} ``` 이제 각 하위 필드는 `answers` 접두사(prefix)로 표시된 대로 별도의 열이 되고, `text` 필드는 이제 리스트가 되었습니다. 각 문장을 개별적으로 토큰화하는 대신 리스트를 문자열로 변환하여 한번에 토큰화할 수 있습니다. 다음은 각 예제에 대해 문자열로 이루어진 리스트를 `join`하고 결과를 토큰화하는 첫 번째 전처리 함수입니다: ```py >>> def preprocess_function(examples): ... return tokenizer([" ".join(x) for x in examples["answers.text"]]) ``` 이 전처리 함수를 전체 데이터 세트에 적용하기 위해 🤗 Datasets [`~datasets.Dataset.map`] 메소드를 사용합니다. 데이터 세트의 여러 요소를 한 번에 처리하도록 `batched=True`를 설정하고 `num_proc`로 처리 횟수를 늘리면 `map` 함수의 속도를 높일 수 있습니다. 필요하지 않은 열은 제거합니다: ```py >>> tokenized_eli5 = eli5.map( ... preprocess_function, ... batched=True, ... num_proc=4, ... remove_columns=eli5["train"].column_names, ... ) ``` 이 데이터 세트에는 토큰 시퀀스가 포함되어 있지만 이 중 일부는 모델의 최대 입력 길이보다 깁니다. 이제 두 번째 전처리 함수를 사용해 - 모든 시퀀스를 연결하고 - 연결된 시퀀스를 정의한 `block_size` 보다 더 짧은 덩어리로 분할하는데, 이 덩어리는 모델의 최대 입력 길이보다 짧고 GPU RAM이 수용할 수 있는 길이여야 합니다. ```py >>> block_size = 128 >>> def group_texts(examples): ... # Concatenate all texts. ... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} ... total_length = len(concatenated_examples[list(examples.keys())[0]]) ... # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can ... # customize this part to your needs. ... if total_length >= block_size: ... total_length = (total_length // block_size) * block_size ... # Split by chunks of block_size. ... result = { ... k: [t[i : i + block_size] for i in range(0, total_length, block_size)] ... for k, t in concatenated_examples.items() ... } ... result["labels"] = result["input_ids"].copy() ... return result ``` 전체 데이터 세트에 `group_texts` 함수를 적용합니다: ```py >>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4) ``` 이제 [`DataCollatorForLanguageModeling`]을 사용하여 데이터 예제의 배치를 생성합니다. 데이터 세트 전체를 최대 길이로 패딩하는 것보다 collation 단계에서 매 배치안에서의 최대 길이로 문장을 *동적으로 패딩*하는 것이 더 효율적입니다. <frameworkcontent> <pt> 시퀀스 끝 토큰을 패딩 토큰으로 사용하고 데이터를 반복할 때마다 토큰을 무작위로 마스킹하도록 `mlm_-probability`를 지정합니다: ```py >>> from transformers import DataCollatorForLanguageModeling >>> tokenizer.pad_token = tokenizer.eos_token >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15) ``` </pt> <tf> 시퀀스 끝 토큰을 패딩 토큰으로 사용하고 데이터를 반복할 때마다 토큰을 무작위로 마스킹하도록 `mlm_-probability`를 지정합니다: ```py >>> from transformers import DataCollatorForLanguageModeling >>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15, return_tensors="tf") ``` </tf> </frameworkcontent> ## 훈련[[train]] <frameworkcontent> <pt> <Tip> [`Trainer`]로 모델을 미세 조정하는 데 익숙하지 않다면 기본 튜토리얼 [여기](../training#train-with-pytorch-trainer)를 살펴보세요! </Tip> 이제 모델 훈련을 시작할 준비가 되었습니다! [`AutoModelForMaskedLM`]를 사용해 DistilRoBERTa 모델을 가져옵니다: ```py >>> from transformers import AutoModelForMaskedLM >>> model = AutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base") ``` 이제 세 단계가 남았습니다: 1. [`TrainingArguments`]의 훈련 하이퍼파라미터를 정의합니다. 모델 저장 위치를 지정하는 `output_dir`은 유일한 필수 파라미터입니다. `push_to_hub=True`를 설정하여 이 모델을 Hub에 업로드합니다 (모델을 업로드하려면 Hugging Face에 로그인해야 합니다). 2. 모델, 데이터 세트 및 데이터 콜레이터(collator)와 함께 훈련 인수를 [`Trainer`]에 전달합니다. 3. [`~Trainer.train`]을 호출하여 모델을 미세 조정합니다. ```py >>> training_args = TrainingArguments( ... output_dir="my_awesome_eli5_mlm_model", ... eval_strategy="epoch", ... learning_rate=2e-5, ... num_train_epochs=3, ... weight_decay=0.01, ... push_to_hub=True, ... ) >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=lm_dataset["train"], ... eval_dataset=lm_dataset["test"], ... data_collator=data_collator, ... ) >>> trainer.train() ``` 훈련이 완료되면 [`~transformers.Trainer.evaluate`] 메소드를 사용하여 펄플렉서티(perplexity)를 계산하고 모델을 평가합니다: ```py >>> import math >>> eval_results = trainer.evaluate() >>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}") Perplexity: 8.76 ``` 그리고 [`~transformers.Trainer.push_to_hub`] 메소드를 사용해 다른 사람들이 사용할 수 있도록, Hub로 모델을 업로드합니다. ```py >>> trainer.push_to_hub() ``` </pt> <tf> <Tip> Keras로 모델을 미세 조정하는 데 익숙하지 않다면 기본 튜토리얼 [여기](../training#train-a-tensorflow-model-with-keras)를 살펴보세요! </Tip> TensorFlow로 모델을 미세 조정하기 위해서는 옵티마이저(optimizer) 함수 설정, 학습률(learning rate) 스케쥴링, 훈련 하이퍼파라미터 설정부터 시작하세요: ```py >>> from transformers import create_optimizer, AdamWeightDecay >>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01) ``` 다음으로 [`TFAutoModelForMaskedLM`]를 사용해 DistilRoBERTa 모델을 가져옵니다: ```py >>> from transformers import TFAutoModelForMaskedLM >>> model = TFAutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base") ``` [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] 메소드를 사용해 데이터 세트를 `tf.data.Dataset` 형식으로 변환하세요: ```py >>> tf_train_set = model.prepare_tf_dataset( ... lm_dataset["train"], ... shuffle=True, ... batch_size=16, ... collate_fn=data_collator, ... ) >>> tf_test_set = model.prepare_tf_dataset( ... lm_dataset["test"], ... shuffle=False, ... batch_size=16, ... collate_fn=data_collator, ... ) ``` [`compile`](https://keras.io/api/models/model_training_apis/#compile-method) 메소드를 통해 모델 훈련을 구성합니다: ```py >>> import tensorflow as tf >>> model.compile(optimizer=optimizer) ``` 이는 업로드할 모델과 토크나이저의 위치를 [`~transformers.PushToHubCallback`]에 지정하여 수행할 수 있습니다: ```py >>> from transformers.keras_callbacks import PushToHubCallback >>> callback = PushToHubCallback( ... output_dir="my_awesome_eli5_mlm_model", ... tokenizer=tokenizer, ... ) ``` 드디어 모델을 훈련할 준비가 되었습니다! 모델을 미세 조정할 때 훈련 및 검증 데이터 세트, 에포크 수, 콜백이 포함된 [`fit`](https://keras.io/api/models/model_training_apis/#fit-method)을 호출합니다: ```py >>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback]) ``` 훈련이 완료되면, 자동으로 Hub로 업로드되어 누구나 사용할 수 있습니다! </tf> </frameworkcontent> <Tip> 마스킹된 언어 모델링을 위해 모델을 미세 조정하는 방법에 대한 보다 심층적인 예제는 [PyTorch notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb) 또는 [TensorFlow notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)을 참조하세요. </Tip> ## 추론[[inference]] 지금까지 모델 미세 조정을 잘 했으니, 추론에 사용할 수 있습니다! 모델이 빈칸을 채울 텍스트를 스페셜 토큰(special token)인 `<mask>` 토큰으로 표시합니다: ```py >>> text = "The Milky Way is a <mask> galaxy." ``` 추론을 위해 미세 조정한 모델을 테스트하는 가장 간단한 방법은 [`pipeline`]에서 사용하는 것입니다. `fill-mask`태스크로 `pipeline`을 인스턴스화하고 텍스트를 전달합니다. `top_k` 매개변수를 사용하여 반환하는 예측의 수를 지정할 수 있습니다: ```py >>> from transformers import pipeline >>> mask_filler = pipeline("fill-mask", "stevhliu/my_awesome_eli5_mlm_model") >>> mask_filler(text, top_k=3) [{'score': 0.5150994658470154, 'token': 21300, 'token_str': ' spiral', 'sequence': 'The Milky Way is a spiral galaxy.'}, {'score': 0.07087188959121704, 'token': 2232, 'token_str': ' massive', 'sequence': 'The Milky Way is a massive galaxy.'}, {'score': 0.06434620916843414, 'token': 650, 'token_str': ' small', 'sequence': 'The Milky Way is a small galaxy.'}] ``` <frameworkcontent> <pt> 텍스트를 토큰화하고 `input_ids`를 PyTorch 텐서 형태로 반환합니다. 또한, `<mask>` 토큰의 위치를 지정해야 합니다: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_eli5_mlm_model") >>> inputs = tokenizer(text, return_tensors="pt") >>> mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1] ``` 모델에 `inputs`를 입력하고, 마스킹된 토큰의 `logits`를 반환합니다: ```py >>> from transformers import AutoModelForMaskedLM >>> model = AutoModelForMaskedLM.from_pretrained("stevhliu/my_awesome_eli5_mlm_model") >>> logits = model(**inputs).logits >>> mask_token_logits = logits[0, mask_token_index, :] ``` 그런 다음 가장 높은 확률은 가진 마스크 토큰 3개를 반환하고, 출력합니다: ```py >>> top_3_tokens = torch.topk(mask_token_logits, 3, dim=1).indices[0].tolist() >>> for token in top_3_tokens: ... print(text.replace(tokenizer.mask_token, tokenizer.decode([token]))) The Milky Way is a spiral galaxy. The Milky Way is a massive galaxy. The Milky Way is a small galaxy. ``` </pt> <tf> 텍스트를 토큰화하고 `input_ids`를 TensorFlow 텐서 형태로 반환합니다. 또한, `<mask>` 토큰의 위치를 지정해야 합니다: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_eli5_mlm_model") >>> inputs = tokenizer(text, return_tensors="tf") >>> mask_token_index = tf.where(inputs["input_ids"] == tokenizer.mask_token_id)[0, 1] ``` 모델에 `inputs`를 입력하고, 마스킹된 토큰의 `logits`를 반환합니다: ```py >>> from transformers import TFAutoModelForMaskedLM >>> model = TFAutoModelForMaskedLM.from_pretrained("stevhliu/my_awesome_eli5_mlm_model") >>> logits = model(**inputs).logits >>> mask_token_logits = logits[0, mask_token_index, :] ``` 그런 다음 가장 높은 확률은 가진 마스크 토큰 3개를 반환하고, 출력합니다: ```py >>> top_3_tokens = tf.math.top_k(mask_token_logits, 3).indices.numpy() >>> for token in top_3_tokens: ... print(text.replace(tokenizer.mask_token, tokenizer.decode([token]))) The Milky Way is a spiral galaxy. The Milky Way is a massive galaxy. The Milky Way is a small galaxy. ``` </tf> </frameworkcontent>
transformers/docs/source/ko/tasks/masked_language_modeling.md/0
{ "file_path": "transformers/docs/source/ko/tasks/masked_language_modeling.md", "repo_id": "transformers", "token_count": 10086 }
306
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 테스트[[testing]] 먼저 🤗 Transformers 모델이 어떻게 테스트되는지 살펴보고, 새로운 테스트를 작성 및 기존 테스트를 개선하는 방법을 알아봅시다. 이 저장소에는 2개의 테스트 스위트가 있습니다: 1. `tests` - 일반 API에 대한 테스트 2. `examples` - API의 일부가 아닌 다양한 응용 프로그램에 대한 테스트 ## Transformers 테스트 방법[[how-transformers-are-tested]] 1. PR이 제출되면 9개의 CircleCi 작업으로 테스트가 진행됩니다. 해당 PR에 대해 새로운 커밋이 생성될 때마다 테스트는 다시 진행됩니다. 이 작업들은 이 [config 파일](https://github.com/huggingface/transformers/tree/main/.circleci/config.yml)에 정의되어 있으므로 필요하다면 사용자의 로컬 환경에서 동일하게 재현해 볼 수 있습니다. 이 CI 작업은 `@slow` 테스트를 실행하지 않습니다. 2. [github actions](https://github.com/huggingface/transformers/actions)에 의해 실행되는 작업은 3개입니다: - [torch hub integration](https://github.com/huggingface/transformers/tree/main/.github/workflows/github-torch-hub.yml): torch hub integration이 작동하는지 확인합니다. - [self-hosted (push)](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-push.yml): `main` 브랜치에서 커밋이 업데이트된 경우에만 GPU를 이용한 빠른 테스트를 실행합니다. 이는 `src`, `tests`, `.github` 폴더 중 하나에 코드가 업데이트된 경우에만 실행됩니다. (model card, notebook, 기타 등등을 추가한 경우 실행되지 않도록 하기 위해서입니다) - [self-hosted runner](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-scheduled.yml): `tests` 및 `examples`에서 GPU를 이용한 일반 테스트, 느린 테스트를 실행합니다. ```bash RUN_SLOW=1 pytest tests/ RUN_SLOW=1 pytest examples/ ``` 결과는 [여기](https://github.com/huggingface/transformers/actions)에서 확인할 수 있습니다. ## 테스트 실행[[running-tests]] ### 실행할 테스트 선택[[choosing-which-tests-to-run]] 이 문서는 테스트를 실행하는 다양한 방법에 대해 자세히 설명합니다. 모든 내용을 읽은 후에도, 더 자세한 내용이 필요하다면 [여기](https://docs.pytest.org/en/latest/usage.html)에서 확인할 수 있습니다. 다음은 가장 유용한 테스트 실행 방법 몇 가지입니다. 모두 실행: ```console pytest ``` 또는: ```bash make test ``` 후자는 다음과 같이 정의됩니다: ```bash python -m pytest -n auto --dist=loadfile -s -v ./tests/ ``` 위의 명령어는 pytest에게 아래의 내용을 전달합니다: - 사용 가능한 CPU 코어 수만큼 테스트 프로세스를 실행합니다. (RAM이 충분하지 않다면, 테스트 프로세스 수가 너무 많을 수 있습니다!) - 동일한 파일의 모든 테스트는 동일한 테스트 프로세스에서 실행되어야 합니다. - 출력을 캡처하지 않습니다. - 자세한 모드로 실행합니다. ### 모든 테스트 목록 가져오기[[getting-the-list-of-all-tests]] 테스트 스위트의 모든 테스트: ```bash pytest --collect-only -q ``` 지정된 테스트 파일의 모든 테스트: ```bash pytest tests/test_optimization.py --collect-only -q ``` ### 특정 테스트 모듈 실행[[run-a-specific-test-module]] 개별 테스트 모듈 실행하기: ```bash pytest tests/utils/test_logging.py ``` ### 특정 테스트 실행[[run-specific-tests]] 대부분의 테스트 내부에서는 unittest가 사용됩니다. 따라서 특정 하위 테스트를 실행하려면 해당 테스트를 포함하는 unittest 클래스의 이름을 알아야 합니다. 예를 들어 다음과 같을 수 있습니다: ```bash pytest tests/test_optimization.py::OptimizationTest::test_adam_w ``` 위의 명령어의 의미는 다음과 같습니다: - `tests/test_optimization.py` - 테스트가 있는 파일 - `OptimizationTest` - 클래스의 이름 - `test_adam_w` - 특정 테스트 함수의 이름 파일에 여러 클래스가 포함된 경우, 특정 클래스의 테스트만 실행할 수도 있습니다. 예를 들어 다음과 같습니다: ```bash pytest tests/test_optimization.py::OptimizationTest ``` 이 명령어는 해당 클래스 내부의 모든 테스트를 실행합니다. 앞에서 언급한 것처럼 `OptimizationTest` 클래스에 포함된 테스트를 확인할 수 있습니다. ```bash pytest tests/test_optimization.py::OptimizationTest --collect-only -q ``` 키워드 표현식을 사용하여 테스트를 실행할 수도 있습니다. `adam`이라는 이름을 포함하는 테스트만 실행하려면 다음과 같습니다: ```bash pytest -k adam tests/test_optimization.py ``` 논리 연산자 `and`와 `or`를 사용하여 모든 키워드가 일치해야 하는지 또는 어느 하나가 일치해야 하는지를 나타낼 수 있습니다. `not`은 부정할 때 사용할 수 있습니다. `adam`이라는 이름을 포함하지 않는 모든 테스트를 실행하려면 다음과 같습니다: ```bash pytest -k "not adam" tests/test_optimization.py ``` 두 가지 패턴을 하나로 결합할 수도 있습니다: ```bash pytest -k "ada and not adam" tests/test_optimization.py ``` 예를 들어 `test_adafactor`와 `test_adam_w`를 모두 실행하려면 다음을 사용할 수 있습니다: ```bash pytest -k "test_adam_w or test_adam_w" tests/test_optimization.py ``` 여기서 `or`를 사용하는 것에 유의하세요. 두 키워드 중 하나가 일치하도록 하기 위한 목적으로 사용하기 때문입니다. 두 패턴이 모두 포함되어야 하는 테스트만 실행하려면, `and`를 사용해야 합니다: ```bash pytest -k "test and ada" tests/test_optimization.py ``` ### `accelerate` 테스트 실행[[run-`accelerate`-tests]] 모델에서 `accelerate` 테스트를 실행해야 할 때가 있습니다. 이를 위해서는 명령어에 `-m accelerate_tests`를 추가하면 됩니다. 예를 들어, `OPT`에서 이러한 테스트를 실행하려면 다음과 같습니다: ```bash RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py ``` ### 문서 테스트 실행[[run-documentation-tests]] 예시 문서가 올바른지 테스트하려면 `doctests`가 통과하는지 확인해야 합니다. 예를 들어, [`WhisperModel.forward`'s docstring](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035)를 사용해 봅시다: ```python r""" Returns: Example: ```python >>> import torch >>> from transformers import WhisperModel, WhisperFeatureExtractor >>> from datasets import load_dataset >>> model = WhisperModel.from_pretrained("openai/whisper-base") >>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base") >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt") >>> input_features = inputs.input_features >>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id >>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state >>> list(last_hidden_state.shape) [1, 2, 512] ```""" ``` 원하는 파일의 모든 docstring 예제를 자동으로 테스트하려면 다음 명령을 실행하면 됩니다: ```bash pytest --doctest-modules <path_to_file_or_dir> ``` 파일의 확장자가 markdown인 경우 `--doctest-glob="*.md"` 인수를 추가해야 합니다. ### 수정된 테스트만 실행[[run-only-modified-tests]] 수정된 파일 또는 현재 브랜치 (Git 기준)와 관련된 테스트를 실행하려면 [pytest-picked](https://github.com/anapaulagomes/pytest-picked)을 사용할 수 있습니다. 이는 변경한 내용이 테스트에 영향을 주지 않았는지 빠르게 확인할 수 있는 좋은 방법입니다. ```bash pip install pytest-picked ``` ```bash pytest --picked ``` 수정되었지만, 아직 커밋되지 않은 모든 파일 및 폴더에서 테스트가 실행됩니다. ### 소스 수정 시 실패한 테스트 자동 재실행[[automatically-rerun-failed-tests-on-source-modification]] [pytest-xdist](https://github.com/pytest-dev/pytest-xdist)는 모든 실패한 테스트를 감지하고, 파일을 수정한 후에 파일을 계속 재실행하여 테스트가 성공할 때까지 기다리는 매우 유용한 기능을 제공합니다. 따라서 수정한 내용을 확인한 후 pytest를 다시 시작할 필요가 없습니다. 모든 테스트가 통과될 때까지 이 과정을 반복한 후 다시 전체 실행이 이루어집니다. ```bash pip install pytest-xdist ``` 재귀적 모드의 사용: `pytest -f` 또는 `pytest --looponfail` 파일의 변경 사항은 `looponfailroots` 루트 디렉터리와 해당 내용을 (재귀적으로) 확인하여 감지됩니다. 이 값의 기본값이 작동하지 않는 경우, `setup.cfg`의 설정 옵션을 변경하여 프로젝트에서 변경할 수 있습니다: ```ini [tool:pytest] looponfailroots = transformers tests ``` 또는 `pytest.ini`/`tox.ini`` 파일: ```ini [pytest] looponfailroots = transformers tests ``` 이렇게 하면 ini-file의 디렉터리를 기준으로 상대적으로 지정된 각 디렉터리에서 파일 변경 사항만 찾게 됩니다. 이 기능을 대체할 수 있는 구현 방법인 [pytest-watch](https://github.com/joeyespo/pytest-watch)도 있습니다. ### 특정 테스트 모듈 건너뛰기[[skip-a-test-module]] 모든 테스트 모듈을 실행하되 특정 모듈을 제외하려면, 실행할 테스트 목록을 명시적으로 지정할 수 있습니다. 예를 들어, `test_modeling_*.py` 테스트를 제외한 모든 테스트를 실행하려면 다음을 사용할 수 있습니다: ```bash pytest *ls -1 tests/*py | grep -v test_modeling* ``` ### 상태 초기화[[clearing state]] CI 빌드 및 (속도에 대한) 격리가 중요한 경우, 캐시를 지워야 합니다: ```bash pytest --cache-clear tests ``` ### 테스트를 병렬로 실행[[running-tests-in-parallel]] 이전에 언급한 것처럼 `make test`는 테스트를 병렬로 실행하기 위해 `pytest-xdist` 플러그인(`-n X` 인수, 예를 들어 `-n 2`를 사용하여 2개의 병렬 작업 실행)을 통해 실행됩니다. `pytest-xdist`의 `--dist=` 옵션을 사용하여 테스트를 어떻게 그룹화할지 제어할 수 있습니다. `--dist=loadfile`은 하나의 파일에 있는 테스트를 동일한 프로세스로 그룹화합니다. 실행된 테스트의 순서가 다르고 예측할 수 없기 때문에, `pytest-xdist`로 테스트 스위트를 실행하면 실패가 발생할 수 있습니다 (검출되지 않은 결합된 테스트가 있는 경우). 이 경우 [pytest-replay](https://github.com/ESSS/pytest-replay)를 사용하면 동일한 순서로 테스트를 다시 실행해서 실패하는 시퀀스를 최소화하는 데에 도움이 됩니다. ### 테스트 순서와 반복[[test-order-and-repetition]] 잠재적인 종속성 및 상태 관련 버그(tear down)를 감지하기 위해 테스트를 여러 번, 연속으로, 무작위로 또는 세트로 반복하는 것이 좋습니다. 그리고 직접적인 여러 번의 반복은 DL의 무작위성에 의해 발견되는 일부 문제를 감지하는 데에도 유용합니다. #### 테스트를 반복[[repeat-tests]] - [pytest-flakefinder](https://github.com/dropbox/pytest-flakefinder): ```bash pip install pytest-flakefinder ``` 모든 테스트를 여러 번 실행합니다(기본값은 50번): ```bash pytest --flake-finder --flake-runs=5 tests/test_failing_test.py ``` <Tip> 이 플러그인은 `pytest-xdist`의 `-n` 플래그와 함께 작동하지 않습니다. </Tip> <Tip> `pytest-repeat`라는 또 다른 플러그인도 있지만 `unittest`와 함께 작동하지 않습니다. </Tip> #### 테스트를 임의의 순서로 실행[[run-tests-in-a-random-order]] ```bash pip install pytest-random-order ``` 중요: `pytest-random-order`가 설치되면 테스트가 자동으로 임의의 순서로 섞입니다. 구성 변경이나 커맨드 라인 옵션이 필요하지 않습니다. 앞서 설명한 것처럼 이를 통해 한 테스트의 상태가 다른 테스트의 상태에 영향을 미치는 결합된 테스트를 감지할 수 있습니다. `pytest-random-order`가 설치되면 해당 세션에서 사용된 랜덤 시드가 출력되며 예를 들어 다음과 같습니다: ```bash pytest tests [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` 따라서 특정 시퀀스가 실패하는 경우에는 정확한 시드를 추가하여 재현할 수 있습니다. 예를 들어 다음과 같습니다: ```bash pytest --random-order-seed=573663 [...] Using --random-order-bucket=module Using --random-order-seed=573663 ``` 정확히 동일한 테스트 목록(또는 목록이 없음)을 사용하는 경우에만 정확한 순서를 재현합니다. 목록을 수동으로 좁히기 시작하면 더 이상 시드에 의존할 수 없고 실패했던 정확한 순서로 수동으로 목록을 나열해야합니다. 그리고 `--random-order-bucket=none`을 사용하여 pytest에게 순서를 임의로 설정하지 않도록 알려야 합니다. 예를 들어 다음과 같습니다: ```bash pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.py ``` 모든 테스트에 대해 섞기를 비활성화하려면 다음과 같습니다: ```bash pytest --random-order-bucket=none ``` 기본적으로 `--random-order-bucket=module`이 내재되어 있으므로, 모듈 수준에서 파일을 섞습니다. 또한 `class`, `package`, `global` 및 `none` 수준에서도 섞을 수 있습니다. 자세한 내용은 해당 [문서](https://github.com/jbasko/pytest-random-order)를 참조하세요. 또 다른 무작위화의 대안은 [`pytest-randomly`](https://github.com/pytest-dev/pytest-randomly)입니다. 이 모듈은 매우 유사한 기능/인터페이스를 가지고 있지만, `pytest-random-order`에 있는 버킷 모드를 사용할 수는 없습니다. 설치 후에는 자동으로 적용되는 문제도 동일하게 가집니다. ### 외관과 느낌을 변경[[look-and-feel-variations] #### pytest-sugar 사용[[pytest-sugar]] [pytest-sugar](https://github.com/Frozenball/pytest-sugar)는 테스트가 보여지는 형태를 개선하고, 진행 상황 바를 추가하며, 실패한 테스트와 검증을 즉시 표시하는 플러그인입니다. 설치하면 자동으로 활성화됩니다. ```bash pip install pytest-sugar ``` pytest-sugar 없이 테스트를 실행하려면 다음과 같습니다: ```bash pytest -p no:sugar ``` 또는 제거하세요. #### 각 하위 테스트 이름과 진행 상황 보고[[report-each-sub-test-name-and-its-progress]] `pytest`를 통해 단일 또는 그룹의 테스트를 실행하는 경우(`pip install pytest-pspec` 이후): ```bash pytest --pspec tests/test_optimization.py ``` #### 실패한 테스트 즉시 표시[[instantly-shows-failed-tests]] [pytest-instafail](https://github.com/pytest-dev/pytest-instafail)은 테스트 세션의 끝까지 기다리지 않고 실패 및 오류를 즉시 표시합니다. ```bash pip install pytest-instafail ``` ```bash pytest --instafail ``` ### GPU 사용 여부[[to-GPU-or-not-to-GPU]] GPU가 활성화된 환경에서, CPU 전용 모드로 테스트하려면 `CUDA_VISIBLE_DEVICES=""`를 추가합니다: ```bash CUDA_VISIBLE_DEVICES="" pytest tests/utils/test_logging.py ``` 또는 다중 GPU가 있는 경우 `pytest`에서 사용할 GPU를 지정할 수도 있습니다. 예를 들어, GPU `0` 및 `1`이 있는 경우 다음을 실행할 수 있습니다: ```bash CUDA_VISIBLE_DEVICES="1" pytest tests/utils/test_logging.py ``` 이렇게 하면 다른 GPU에서 다른 작업을 실행하려는 경우 유용합니다. 일부 테스트는 반드시 CPU 전용으로 실행해야 하며, 일부는 CPU 또는 GPU 또는 TPU에서 실행해야 하고, 일부는 여러 GPU에서 실행해야 합니다. 다음 스킵 데코레이터는 테스트의 요구 사항을 CPU/GPU/TPU별로 설정하는 데 사용됩니다: - `require_torch` - 이 테스트는 torch에서만 실행됩니다. - `require_torch_gpu` - `require_torch`에 추가로 적어도 1개의 GPU가 필요합니다. - `require_torch_multi_gpu` - `require_torch`에 추가로 적어도 2개의 GPU가 필요합니다. - `require_torch_non_multi_gpu` - `require_torch`에 추가로 0개 또는 1개의 GPU가 필요합니다. - `require_torch_up_to_2_gpus` - `require_torch`에 추가로 0개, 1개 또는 2개의 GPU가 필요합니다. - `require_torch_xla` - `require_torch`에 추가로 적어도 1개의 TPU가 필요합니다. GPU 요구 사항을 표로 정리하면 아래와 같습니디ㅏ: | n gpus | decorator | |--------+--------------------------------| | `>= 0` | `@require_torch` | | `>= 1` | `@require_torch_gpu` | | `>= 2` | `@require_torch_multi_gpu` | | `< 2` | `@require_torch_non_multi_gpu` | | `< 3` | `@require_torch_up_to_2_gpus` | 예를 들어, 2개 이상의 GPU가 있고 pytorch가 설치되어 있을 때에만 실행되어야 하는 테스트는 다음과 같습니다: ```python no-style @require_torch_multi_gpu def test_example_with_multi_gpu(): ``` `tensorflow`가 필요한 경우 `require_tf` 데코레이터를 사용합니다. 예를 들어 다음과 같습니다: ```python no-style @require_tf def test_tf_thing_with_tensorflow(): ``` 이러한 데코레이터는 중첩될 수 있습니다. 예를 들어, 느린 테스트로 진행되고 pytorch에서 적어도 하나의 GPU가 필요한 경우 다음과 같이 설정할 수 있습니다: ```python no-style @require_torch_gpu @slow def test_example_slow_on_gpu(): ``` `@parametrized`와 같은 일부 데코레이터는 테스트 이름을 다시 작성하기 때문에 `@require_*` 스킵 데코레이터는 올바르게 작동하려면 항상 맨 마지막에 나열되어야 합니다. 다음은 올바른 사용 예입니다: ```python no-style @parameterized.expand(...) @require_torch_multi_gpu def test_integration_foo(): ``` `@pytest.mark.parametrize`에는 이러한 순서 문제는 없으므로 처음 혹은 마지막에 위치시킬 수 있고 이러한 경우에도 잘 작동할 것입니다. 하지만 unittest가 아닌 경우에만 작동합니다. 테스트 내부에서 다음을 사용할 수 있습니다: - 사용 가능한 GPU 수: ```python from transformers.testing_utils import get_gpu_count n_gpu = get_gpu_count() #torch와 tf와 함께 작동 ``` ### 분산 훈련[[distributed-training]] `pytest`는 분산 훈련을 직접적으로 다루지 못합니다. 이를 시도하면 하위 프로세스가 올바른 작업을 수행하지 않고 `pytest`라고 생각하기에 테스트 스위트를 반복해서 실행하게 됩니다. 그러나 일반 프로세스를 생성한 다음 여러 워커를 생성하고 IO 파이프를 관리하도록 하면 동작합니다. 다음은 사용 가능한 테스트입니다: - [test_trainer_distributed.py](https://github.com/huggingface/transformers/tree/main/tests/trainer/test_trainer_distributed.py) - [test_deepspeed.py](https://github.com/huggingface/transformers/tree/main/tests/deepspeed/test_deepspeed.py) 실행 지점으로 바로 이동하려면, 해당 테스트에서 `execute_subprocess_async` 호출을 검색하세요. 이러한 테스트를 실행하려면 적어도 2개의 GPU가 필요합니다. ```bash CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py ``` ### 출력 캡처[[output-capture]] 테스트 실행 중 `stdout` 및 `stderr`로 전송된 모든 출력이 캡처됩니다. 테스트나 설정 메소드가 실패하면 캡처된 출력은 일반적으로 실패 추적 정보와 함께 표시됩니다. 출력 캡처를 비활성화하고 `stdout` 및 `stderr`를 정상적으로 받으려면 `-s` 또는 `--capture=no`를 사용하세요: ```bash pytest -s tests/utils/test_logging.py ``` 테스트 결과를 JUnit 형식의 출력으로 보내려면 다음을 사용하세요: ```bash py.test tests --junitxml=result.xml ``` ### 색상 조절[[color-control]] 색상이 없게 하려면 다음과 같이 설정하세요(예를 들어 흰색 배경에 노란색 글씨는 가독성이 좋지 않습니다): ```bash pytest --color=no tests/utils/test_logging.py ``` ### online pastebin service에 테스트 보고서 전송[[sending test report to online pastebin service]] 각 테스트 실패에 대한 URL을 만듭니다: ```bash pytest --pastebin=failed tests/utils/test_logging.py ``` 이렇게 하면 각 실패에 대한 URL을 제공하는 remote Paste service에 테스트 실행 정보를 제출합니다. 일반적인 테스트를 선택할 수도 있고 혹은 특정 실패만 보내려면 `-x`와 같이 추가할 수도 있습니다. 전체 테스트 세션 로그에 대한 URL을 생성합니다: ```bash pytest --pastebin=all tests/utils/test_logging.py ``` ## 테스트 작성[[writing-tests]] 🤗 transformers 테스트는 대부분 `unittest`를 기반으로 하지만, `pytest`에서 실행되므로 대부분의 경우 두 시스템의 기능을 사용할 수 있습니다. 지원되는 기능에 대해 [여기](https://docs.pytest.org/en/stable/unittest.html)에서 확인할 수 있지만, 기억해야 할 중요한 점은 대부분의 `pytest` fixture가 작동하지 않는다는 것입니다. 파라미터화도 작동하지 않지만, 우리는 비슷한 방식으로 작동하는 `parameterized` 모듈을 사용합니다. ### 매개변수화[[parametrization]] 동일한 테스트를 다른 인수로 여러 번 실행해야 하는 경우가 종종 있습니다. 테스트 내에서 이 작업을 수행할 수 있지만, 그렇게 하면 하나의 인수 세트에 대해 테스트를 실행할 수 없습니다. ```python # test_this1.py import unittest from parameterized import parameterized class TestMathUnitTest(unittest.TestCase): @parameterized.expand( [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ] ) def test_floor(self, name, input, expected): assert_equal(math.floor(input), expected) ``` 이제 기본적으로 이 테스트는 `test_floor`의 마지막 3개 인수가 매개변수 목록의 해당 인수에 할당되는 것으로 3번 실행될 것입니다. 그리고 `negative` 및 `integer` 매개변수 집합만 실행하려면 다음과 같이 실행할 수 있습니다: ```bash pytest -k "negative and integer" tests/test_mytest.py ``` 또는 `negative` 하위 테스트를 제외한 모든 서브 테스트를 다음과 같이 실행할 수 있습니다: ```bash pytest -k "not negative" tests/test_mytest.py ``` 앞에서 언급한 `-k` 필터를 사용하는 것 외에도, 각 서브 테스트의 정확한 이름을 확인한 후에 일부 혹은 전체 서브 테스트를 실행할 수 있습니다. ```bash pytest test_this1.py --collect-only -q ``` 그리고 다음의 내용을 확인할 수 있을 것입니다: ```bash test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer test_this1.py::TestMathUnitTest::test_floor_2_large_fraction ``` 2개의 특정한 서브 테스트만 실행할 수도 있습니다: ```bash pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer ``` `transformers`의 개발자 종속성에 이미 있는 [parameterized](https://pypi.org/project/parameterized/) 모듈은 `unittests`와 `pytest` 테스트 모두에서 작동합니다. 그러나 테스트가 `unittest`가 아닌 경우 `pytest.mark.parametrize`를 사용할 수 있습니다(이미 있는 일부 테스트에서 사용되는 경우도 있습니다. 주로 `examples` 하위에 있습니다). 다음은 `pytest`의 `parametrize` 마커를 사용한 동일한 예입니다: ```python # test_this2.py import pytest @pytest.mark.parametrize( "name, input, expected", [ ("negative", -1.5, -2.0), ("integer", 1, 1.0), ("large fraction", 1.6, 1), ], ) def test_floor(name, input, expected): assert_equal(math.floor(input), expected) ``` `parameterized`와 마찬가지로 `pytest.mark.parametrize`를 사용하면 `-k` 필터가 작동하지 않는 경우에도 실행할 서브 테스트를 정확하게 지정할 수 있습니다. 단, 이 매개변수화 함수는 서브 테스트의 이름 집합을 약간 다르게 생성합니다. 다음과 같은 모습입니다: ```bash pytest test_this2.py --collect-only -q ``` 그리고 다음의 내용을 확인할 수 있을 것입니다: ```bash test_this2.py::test_floor[integer-1-1.0] test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[large fraction-1.6-1] ``` 특정한 테스트에 대해서만 실행할 수도 있습니다: ```bash pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[integer-1-1.0] ``` 이전의 예시와 같이 실행할 수 있습니다. ### 파일 및 디렉터리[[files-and-directories]] 테스트에서 종종 현재 테스트 파일과 관련된 상대적인 위치를 알아야 하는 경우가 있습니다. 테스트가 여러 디렉터리에서 호출되거나 깊이가 다른 하위 디렉터리에 있을 수 있기 때문에 그 위치를 아는 것은 간단하지 않습니다. `transformers.test_utils.TestCasePlus`라는 헬퍼 클래스는 모든 기본 경로를 처리하고 간단한 액세서를 제공하여 이 문제를 해결합니다: - `pathlib` 객체(완전히 정해진 경로) - `test_file_path` - 현재 테스트 파일 경로 (예: `__file__`) - test_file_dir` - 현재 테스트 파일이 포함된 디렉터리 - tests_dir` - `tests` 테스트 스위트의 디렉터리 - examples_dir` - `examples` 테스트 스위트의 디렉터리 - repo_root_dir` - 저장소 디렉터리 - src_dir` - `src`의 디렉터리(예: `transformers` 하위 디렉터리가 있는 곳) - 문자열로 변환된 경로---위와 동일하지만, `pathlib` 객체가 아닌 문자열로 경로를 반환합니다: - `test_file_path_str` - `test_file_dir_str` - `tests_dir_str` - `examples_dir_str` - `repo_root_dir_str` - `src_dir_str` 위의 내용을 사용하려면 테스트가 'transformers.test_utils.TestCasePlus'의 서브클래스에 있는지 확인해야 합니다. 예를 들어 다음과 같습니다: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_local_locations(self): data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro" ``` 만약 `pathlib`를 통해 경로를 조작할 필요가 없거나 경로를 문자열로만 필요로 하는 경우에는 `pathlib` 객체에 `str()`을 호출하거나 `_str`로 끝나는 접근자를 사용할 수 있습니다. 예를 들어 다음과 같습니다: ```python from transformers.testing_utils import TestCasePlus class PathExampleTest(TestCasePlus): def test_something_involving_stringified_locations(self): examples_dir = self.examples_dir_str ``` ### 임시 파일 및 디렉터리[[temporary-files-and-directories]] 고유한 임시 파일 및 디렉터리를 사용하는 것은 병렬 테스트 실행에 있어 필수적입니다. 이렇게 함으로써 테스트들이 서로의 데이터를 덮어쓰지 않게 할 수 있습니다. 또한 우리는 생성된 테스트의 종료 단계에서 이러한 임시 파일 및 디렉터리를 제거하고 싶습니다. 따라서 이러한 요구 사항을 충족시켜주는 `tempfile`과 같은 패키지를 사용하는 것이 중요합니다. 그러나 테스트를 디버깅할 때는 임시 파일이나 디렉터리에 들어가는 내용을 확인할 수 있어야 하며, 재실행되는 각 테스트마다 임시 파일이나 디렉터리의 경로에 대해 무작위 값이 아닌 정확한 값을 알고 싶을 것입니다. `transformers.test_utils.TestCasePlus`라는 도우미 클래스는 이러한 목적에 가장 적합합니다. 이 클래스는 `unittest.TestCase`의 하위 클래스이므로, 우리는 이것을 테스트 모듈에서 쉽게 상속할 수 있습니다. 다음은 해당 클래스를 사용하는 예시입니다: ```python from transformers.testing_utils import TestCasePlus class ExamplesTests(TestCasePlus): def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` 이 코드는 고유한 임시 디렉터리를 생성하고 `tmp_dir`을 해당 위치로 설정합니다. - 고유한 임시 디렉터리를 생성합니다: ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir() ``` `tmp_dir`에는 생성된 임시 디렉터리의 경로가 포함됩니다. 이는 테스트의 종료 단계에서 자동으로 제거됩니다. - 선택한 경로로 임시 디렉터리 생성 후에 테스트 시작 전에 비어 있는 상태인지 확인하고, 테스트 후에는 비우지 마세요. ```python def test_whatever(self): tmp_dir = self.get_auto_remove_tmp_dir("./xxx") ``` 이것은 디버깅할 때 특정 디렉터리를 모니터링하고, 그 디렉터리에 이전에 실행된 테스트가 데이터를 남기지 않도록 하는 데에 유용합니다. - `before` 및 `after` 인수를 직접 오버라이딩하여 기본 동작을 변경할 수 있으며 다음 중 하나의 동작으로 이어집니다: - `before=True`: 테스트 시작 시 임시 디렉터리가 항상 지워집니다. - `before=False`: 임시 디렉터리가 이미 존재하는 경우 기존 파일은 그대로 남습니다. - `after=True`: 테스트 종료 시 임시 디렉터리가 항상 삭제됩니다. - `after=False`: 테스트 종료 시 임시 디렉터리가 항상 그대로 유지됩니다. <Tip> `rm -r`에 해당하는 명령을 안전하게 실행하기 위해, 명시적인 `tmp_dir`을 사용하는 경우 프로젝트 저장소 체크 아웃의 하위 디렉터리만 허용됩니다. 따라서 실수로 `/tmp`가 아닌 중요한 파일 시스템의 일부가 삭제되지 않도록 항상 `./`로 시작하는 경로를 전달해야 합니다. </Tip> <Tip> 각 테스트는 여러 개의 임시 디렉터리를 등록할 수 있으며, 별도로 요청하지 않는 한 모두 자동으로 제거됩니다. </Tip> ### 임시 sys.path 오버라이드[[temporary-sys.path-override]] `sys.path`를 다른 테스트로 임시로 오버라이드하기 위해 예를 들어 `ExtendSysPath` 컨텍스트 관리자를 사용할 수 있습니다. 예를 들어 다음과 같습니다: ```python import os from transformers.testing_utils import ExtendSysPath bindir = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f"{bindir}/.."): from test_trainer import TrainerIntegrationCommon # noqa ``` ### 테스트 건너뛰기[[skipping-tests]] 이것은 버그가 발견되어 새로운 테스트가 작성되었지만 아직 그 버그가 수정되지 않은 경우에 유용합니다. 이 테스트를 주 저장소에 커밋하려면 `make test` 중에 건너뛰도록 해야 합니다. 방법: - **skip**은 테스트가 일부 조건이 충족될 경우에만 통과될 것으로 예상되고, 그렇지 않으면 pytest가 전체 테스트를 건너뛰어야 함을 의미합니다. 일반적인 예로는 Windows가 아닌 플랫폼에서 Windows 전용 테스트를 건너뛰거나 외부 리소스(예를 들어 데이터베이스)에 의존하는 테스트를 건너뛰는 것이 있습니다. - **xfail**은 테스트가 특정한 이유로 인해 실패할 것으로 예상하는 것을 의미합니다. 일반적인 예로는 아직 구현되지 않은 기능이나 아직 수정되지 않은 버그의 테스트가 있습니다. `xfail`로 표시된 테스트가 예상대로 실패하지 않고 통과된 경우, 이것은 xpass이며 테스트 결과 요약에 기록됩니다. 두 가지 중요한 차이점 중 하나는 `skip`은 테스트를 실행하지 않지만 `xfail`은 실행한다는 것입니다. 따라서 오류가 있는 코드가 일부 테스트에 영향을 미칠 수 있는 경우 `xfail`을 사용하지 마세요. #### 구현[[implementation]] - 전체 테스트를 무조건 건너뛰려면 다음과 같이 할 수 있습니다: ```python no-style @unittest.skip(reason="this bug needs to be fixed") def test_feature_x(): ``` 또는 pytest를 통해: ```python no-style @pytest.mark.skip(reason="this bug needs to be fixed") ``` 또는 `xfail` 방식으로: ```python no-style @pytest.mark.xfail def test_feature_x(): ``` - 테스트 내부에서 내부 확인에 따라 테스트를 건너뛰는 방법은 다음과 같습니다: ```python def test_feature_x(): if not has_something(): pytest.skip("unsupported configuration") ``` 또는 모듈 전체: ```python import pytest if not pytest.config.getoption("--custom-flag"): pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True) ``` 또는 `xfail` 방식으로: ```python def test_feature_x(): pytest.xfail("expected to fail until bug XYZ is fixed") ``` - import가 missing된 모듈이 있을 때 그 모듈의 모든 테스트를 건너뛰는 방법: ```python docutils = pytest.importorskip("docutils", minversion="0.3") ``` - 조건에 따라 테스트를 건너뛰는 방법: ```python no-style @pytest.mark.skipif(sys.version_info < (3,6), reason="requires python3.6 or higher") def test_feature_x(): ``` 또는: ```python no-style @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_feature_x(): ``` 또는 모듈 전체를 건너뛰는 방법: ```python no-style @pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows") class TestClass(): def test_feature_x(self): ``` 보다 자세한 예제 및 방법은 [여기](https://docs.pytest.org/en/latest/skipping.html)에서 확인할 수 있습니다. ### 느린 테스트[[slow-tests]] 테스트 라이브러리는 지속적으로 확장되고 있으며, 일부 테스트는 실행하는 데 몇 분이 걸립니다. 그리고 우리에게는 테스트 스위트가 CI를 통해 완료되기까지 한 시간을 기다릴 여유가 없습니다. 따라서 필수 테스트를 위한 일부 예외를 제외하고 느린 테스트는 다음과 같이 표시해야 합니다. ```python no-style from transformers.testing_utils import slow @slow def test_integration_foo(): ``` `@slow`로 표시된 테스트를 실행하려면 `RUN_SLOW=1` 환경 변수를 설정하세요. 예를 들어 다음과 같습니다: ```bash RUN_SLOW=1 pytest tests ``` `@parameterized`와 같은 몇 가지 데코레이터는 테스트 이름을 다시 작성합니다. 그러므로 `@slow`와 나머지 건너뛰기 데코레이터 `@require_*`가 올바르게 작동되려면 마지막에 나열되어야 합니다. 다음은 올바른 사용 예입니다. ```python no-style @parameterized.expand(...) @slow def test_integration_foo(): ``` 이 문서의 초반부에 설명된 것처럼 느린 테스트는 PR의 CI 확인이 아닌 예약된 일정 기반으로 실행됩니다. 따라서 PR 제출 중에 일부 문제를 놓친 채로 병합될 수 있습니다. 이러한 문제들은 다음번의 예정된 CI 작업 중에 감지됩니다. 하지만 PR을 제출하기 전에 자신의 컴퓨터에서 느린 테스트를 실행하는 것 또한 중요합니다. 느린 테스트로 표시해야 하는지 여부를 결정하는 대략적인 결정 기준은 다음과 같습니다. 만약 테스트가 라이브러리의 내부 구성 요소 중 하나에 집중되어 있다면(예: 모델링 파일, 토큰화 파일, 파이프라인), 해당 테스트를 느린 테스트 스위트에서 실행해야 합니다. 만약 라이브러리의 다른 측면(예: 문서 또는 예제)에 집중되어 있다면, 해당 테스트를 느린 테스트 스위트에서 실행해야 합니다. 그리고 이 접근 방식을 보완하기 위해 예외를 만들어야 합니다. - 무거운 가중치 세트나 50MB보다 큰 데이터셋을 다운로드해야 하는 모든 테스트(예: 모델 통합 테스트, 토크나이저 통합 테스트, 파이프라인 통합 테스트)를 느린 테스트로 설정해야 합니다. 새로운 모델을 추가하는 경우 통합 테스트용으로 무작위 가중치로 작은 버전을 만들어 허브에 업로드해야 합니다. 이 내용은 아래 단락에서 설명됩니다. - 특별히 빠르게 실행되도록 최적화되지 않은 학습을 수행해야 하는 테스트는 느린 테스트로 설정해야 합니다. - 느리지 않아야 할 테스트 중 일부가 극도로 느린 경우 예외를 도입하고 이를 `@slow`로 설정할 수 있습니다. 대용량 파일을 디스크에 저장하고 불러오는 자동 모델링 테스트는 `@slow`으로 표시된 테스트의 좋은 예입니다. - CI에서 1초 이내에 테스트가 완료되는 경우(다운로드 포함)에는 느린 테스트가 아니어야 합니다. 느린 테스트가 아닌 경우에는 다양한 내부를 완전히 커버하면서 빠르게 유지되어야 합니다. 예를 들어, 무작위 가중치를 사용하여 특별히 생성된 작은 모델로 테스트하면 상당한 커버리지를 얻을 수 있습니다. 이러한 모델은 최소한의 레이어 수(예: 2), 어휘 크기(예: 1000) 등의 요소만 가집니다. 그런 다음 `@slow` 테스트는 대형 느린 모델을 사용하여 정성적인 테스트를 수행할 수 있습니다. 이러한 작은 모델을 사용하는 방법을 확인하려면 다음과 같이 *tiny* 모델을 찾아보세요. ```bash grep tiny tests examples ``` 다음은 작은 모델[stas/tiny-wmt19-en-de](https://huggingface.co/stas/tiny-wmt19-en-de)을 만든 [script](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py) 예시입니다. 특정 모델의 아키텍처에 맞게 쉽게 조정할 수 있습니다. 예를 들어 대용량 모델을 다운로드하는 경우 런타임을 잘못 측정하기 쉽지만, 로컬에서 테스트하면 다운로드한 파일이 캐시되어 다운로드 시간이 측정되지 않습니다. 대신 CI 로그의 실행 속도 보고서를 확인하세요(`pytest --durations=0 tests`의 출력). 이 보고서는 느린 이상값으로 표시되지 않거나 빠르게 다시 작성해야 하는 느린 이상값을 찾는 데도 유용합니다. CI에서 테스트 스위트가 느려지기 시작하면 이 보고서의 맨 위 목록에 가장 느린 테스트가 표시됩니다. ### stdout/stderr 출력 테스트[[testing-the-stdout/stderr-output]] `stdout` 및/또는 `stderr`로 쓰는 함수를 테스트하려면 `pytest`의 [capsys 시스템](https://docs.pytest.org/en/latest/capture.html)을 사용하여 해당 스트림에 액세스할 수 있습니다. 다음과 같이 수행할 수 있습니다. ```python import sys def print_to_stdout(s): print(s) def print_to_stderr(s): sys.stderr.write(s) def test_result_and_stdout(capsys): msg = "Hello" print_to_stdout(msg) print_to_stderr(msg) out, err = capsys.readouterr() # 캡처된 출력 스트림 사용 # 선택 사항: 캡처된 스트림 재생성 sys.stdout.write(out) sys.stderr.write(err) # 테스트: assert msg in out assert msg in err ``` 그리고, 물론 대부분의 경우에는 `stderr`는 예외의 일부로 제공됩니다. 그러므로 해당 경우에는 try/except를 사용해야 합니다. ```python def raise_exception(msg): raise ValueError(msg) def test_something_exception(): msg = "Not a good value" error = "" try: raise_exception(msg) except Exception as e: error = str(e) assert msg in error, f"{msg} is in the exception:\n{error}" ``` `stdout`를 캡처하는 또 다른 방법은 `contextlib.redirect_stdout`를 사용하는 것입니다. ```python from io import StringIO from contextlib import redirect_stdout def print_to_stdout(s): print(s) def test_result_and_stdout(): msg = "Hello" buffer = StringIO() with redirect_stdout(buffer): print_to_stdout(msg) out = buffer.getvalue() # 선택 사항: 캡처된 스트림 재생성 sys.stdout.write(out) # 테스트: assert msg in out ``` `stdout` 캡처에 관련된 중요한 문제 중 하나는 보통 `print`에서 이전에 인쇄된 내용을 재설정하는 `\r` 문자가 포함될 수 있다는 것입니다. `pytest`에서는 문제가 없지만 `pytest -s`에서는 이러한 문자가 버퍼에 포함되므로 `-s`가 있거나 없는 상태에서 태스트를 수행할 수 있으려면 캡처된 출력에 대해 추가적인 정리가 필요합니다. 이 경우에는 `re.sub(r'~.*\r', '', buf, 0, re.M)`을 사용할 수 있습니다. 하지만 도우미 컨텍스트 관리자 래퍼를 사용하면 출력에 `\r`이 포함되어 있는지의 여부에 관계없이 모든 것을 자동으로 처리하므로 편리합니다. ```python from transformers.testing_utils import CaptureStdout with CaptureStdout() as cs: function_that_writes_to_stdout() print(cs.out) ``` 다음은 전체 테스트 예제입니다. ```python from transformers.testing_utils import CaptureStdout msg = "Secret message\r" final = "Hello World" with CaptureStdout() as cs: print(msg + final) assert cs.out == final + "\n", f"captured: {cs.out}, expecting {final}" ``` `stderr`를 캡처하고 싶다면, 대신 `CaptureStderr` 클래스를 사용하세요. ```python from transformers.testing_utils import CaptureStderr with CaptureStderr() as cs: function_that_writes_to_stderr() print(cs.err) ``` 두 스트림을 동시에 캡처해야 한다면, 부모 `CaptureStd` 클래스를 사용하세요. ```python from transformers.testing_utils import CaptureStd with CaptureStd() as cs: function_that_writes_to_stdout_and_stderr() print(cs.err, cs.out) ``` 또한, 테스트의 디버깅을 지원하기 위해 이러한 컨텍스트 관리자는 기본적으로 컨텍스트에서 종료할 때 캡처된 스트림을 자동으로 다시 실행합니다. ### 로거 스트림 캡처[[capturing-logger-stream]] 로거 출력을 검증해야 하는 경우 `CaptureLogger`를 사용할 수 있습니다. ```python from transformers import logging from transformers.testing_utils import CaptureLogger msg = "Testing 1, 2, 3" logging.set_verbosity_info() logger = logging.get_logger("transformers.models.bart.tokenization_bart") with CaptureLogger(logger) as cl: logger.info(msg) assert cl.out, msg + "\n" ``` ### 환경 변수를 이용하여 테스트[[testing-with-environment-variables]] 특정 테스트의 환경 변수 영향을 검증하려면 `transformers.testing_utils.mockenv`라는 도우미 데코레이터를 사용할 수 있습니다. ```python from transformers.testing_utils import mockenv class HfArgumentParserTest(unittest.TestCase): @mockenv(TRANSFORMERS_VERBOSITY="error") def test_env_override(self): env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) ``` 일부 경우에는 외부 프로그램을 호출해야할 수도 있는데, 이 때에는 여러 개의 로컬 경로를 포함하는 `os.environ`에서 `PYTHONPATH`의 설정이 필요합니다. 헬퍼 클래스 `transformers.test_utils.TestCasePlus`가 도움이 됩니다: ```python from transformers.testing_utils import TestCasePlus class EnvExampleTest(TestCasePlus): def test_external_prog(self): env = self.get_env() # 이제 `env`를 사용하여 외부 프로그램 호출 ``` 테스트 파일이 `tests` 테스트 스위트 또는 `examples`에 있는지에 따라 `env[PYTHONPATH]`가 두 디렉터리 중 하나를 포함하도록 설정되며, 현재 저장소에 대해 테스트가 수행되도록 `src` 디렉터리도 포함됩니다. 테스트 호출 이전에 설정된 경우에는 `env[PYTHONPATH]`를 그대로 사용합니다. 이 헬퍼 메소드는 `os.environ` 객체의 사본을 생성하므로 원본은 그대로 유지됩니다. ### 재현 가능한 결과 얻기[[getting-reproducible-results]] 일부 상황에서 테스트에서 임의성을 제거하여 동일하게 재현 가능한 결과를 얻고 싶을 수 있습니다. 이를 위해서는 다음과 같이 시드를 고정해야 합니다. ```python seed = 42 # 파이썬 RNG import random random.seed(seed) # 파이토치 RNG import torch torch.manual_seed(seed) torch.backends.cudnn.deterministic = True if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) # 넘파이 RNG import numpy as np np.random.seed(seed) # 텐서플로 RNG tf.random.set_seed(seed) ``` ### 테스트 디버깅[[debugging tests]] 경고가 있는 곳에서 디버거를 시작하려면 다음을 수행하세요. ```bash pytest tests/utils/test_logging.py -W error::UserWarning --pdb ``` ## Github Actions 워크플로우 작업 처리[[working-with-github-actions-workflows]] 셀프 푸시 워크플로우 CI 작업을 트리거하려면, 다음을 수행해야 합니다. 1. `transformers` 원본에서 새 브랜치를 만듭니다(포크가 아닙니다!). 2. 브랜치 이름은 `ci_` 또는 `ci-`로 시작해야 합니다(`main`도 트리거하지만 `main`에서는 PR을 할 수 없습니다). 또한 특정 경로에 대해서만 트리거되므로 이 문서가 작성된 후에 변경된 내용은 [여기](https://github.com/huggingface/transformers/blob/main/.github/workflows/self-push.yml)의 *push:*에서 확인할 수 있습니다. 3. 이 브랜치에서 PR을 생성합니다 4. 그런 다음 [여기](https://github.com/huggingface/transformers/actions/workflows/self-push.yml)에서 작업이 나타나는지 확인할 수 있습니다. 백로그가 있는 경우, 바로 실행되지 않을 수도 있습니다. ## 실험적인 CI 기능 테스트[[testing-Experimental-CI-Features]] CI 기능을 테스트하는 것은 일반 CI 작동에 방해가 될 수 있기 때문에 잠재적으로 문제가 발생할 수 있습니다. 따라서 새로운 CI 기능을 추가하는 경우 다음과 같이 수행해야 합니다. 1. 테스트해야 할 내용을 테스트하는 새로운 전용 작업을 생성합니다. 2. 새로운 작업은 항상 성공해야만 녹색 ✓를 받을 수 있습니다(아래에 자세한 내용이 있습니다). 3. 다양한 PR 유형에 대한 확인을 위해 (사용자 포크 브랜치, 포크되지 않은 브랜치, github.com UI 직접 파일 편집에서 생성된 브랜치, 강제 푸시 등 PR의 유형은 아주 다양합니다.) 며칠 동안 실험 작업의 로그를 모니터링하면서 실행해봅니다. (의도적으로 항상 녹색을 표시하므로 작업 전체가 녹색은 아니라는 점에 유의합니다.) 4. 모든 것이 안정적인지 확인한 후, 새로운 변경 사항을 기존 작업에 병합합니다. 이렇게 하면 CI 기능 자체에 대한 실험이 일반 작업 흐름에 방해가 되지 않습니다. 그러나 새로운 CI 기능이 개발 중인 동안, 항상 성공하도록 할 수 있는 방법은 무엇일까요? TravisCI와 같은 일부 CI는 `ignore-step-failure`를 지원하며 전체 작업을 성공한 것으로 보고하지만, 현재 우리가 사용하는 CircleCI와 Github Actions는 이를 지원하지 않습니다. 따라서 다음과 같은 해결책을 사용할 수 있습니다. 1. bash 스크립트에서 가능한 많은 오류를 억제하기 위해 실행 명령의 시작 부분에 `set +euo pipefail`을 추가합니다. 2. 마지막 명령은 반드시 성공해야 합니다. `echo "done"` 또는 `true`를 사용하면 됩니다. 예시는 다음과 같습니다. ```yaml - run: name: run CI experiment command: | set +euo pipefail echo "setting run-all-despite-any-errors-mode" this_command_will_fail echo "but bash continues to run" # emulate another failure false # but the last command must be a success echo "during experiment do not remove: reporting success to CI, even if there were failures" ``` 간단한 명령의 경우 다음과 같이 수행할 수도 있습니다. ```bash cmd_that_may_fail || true ``` 결과에 만족한 후에는 물론, 실험적인 단계 또는 작업을 일반 작업의 나머지 부분과 통합하면서 `set +euo pipefail` 또는 기타 추가한 요소를 제거하여 실험 작업이 일반 CI 작동에 방해되지 않도록 해야 합니다. 이 전반적인 과정은 실험 단계가 PR의 전반적인 상태에 영향을 주지 않고 실패하도록 `allow-failure`와 같은 기능을 설정할 수 있다면 훨씬 더 쉬웠을 것입니다. 그러나 앞에서 언급한 바와 같이 CircleCI와 Github Actions는 현재 이러한 기능들 지원하지 않습니다. 이 기능의 지원을 위한 투표에 참여하고 CI 관련 스레드들에서 이러한 상황을 확인할 수도 있습니다. - [Github Actions:](https://github.com/actions/toolkit/issues/399) - [CircleCI:](https://ideas.circleci.com/ideas/CCI-I-344)
transformers/docs/source/ko/testing.md/0
{ "file_path": "transformers/docs/source/ko/testing.md", "repo_id": "transformers", "token_count": 35205 }
307
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Compartilhando modelos customizados A biblioteca 🤗 Transformers foi projetada para ser facilmente extensível. Cada modelo é totalmente codificado em uma determinada subpasta do repositório sem abstração, para que você possa copiar facilmente um arquivo de modelagem e ajustá-lo às suas necessidades. Se você estiver escrevendo um modelo totalmente novo, pode ser mais fácil começar do zero. Neste tutorial, mostraremos como escrever um modelo customizado e sua configuração para que possa ser usado com Transformers, e como você pode compartilhá-lo com a comunidade (com o código em que se baseia) para que qualquer pessoa possa usá-lo, mesmo se não estiver presente na biblioteca 🤗 Transformers. Ilustraremos tudo isso em um modelo ResNet, envolvendo a classe ResNet do [biblioteca timm](https://github.com/rwightman/pytorch-image-models) em um [`PreTrainedModel`]. ## Escrevendo uma configuração customizada Antes de mergulharmos no modelo, vamos primeiro escrever sua configuração. A configuração de um modelo é um objeto que terá todas as informações necessárias para construir o modelo. Como veremos na próxima seção, o modelo só pode ter um `config` para ser inicializado, então realmente precisamos que esse objeto seja o mais completo possível. Em nosso exemplo, pegaremos alguns argumentos da classe ResNet que podemos querer ajustar. Diferentes configurações nos dará os diferentes tipos de ResNets que são possíveis. Em seguida, apenas armazenamos esses argumentos, após verificar a validade de alguns deles. ```python from transformers import PretrainedConfig from typing import List class ResnetConfig(PretrainedConfig): model_type = "resnet" def __init__( self, block_type="bottleneck", layers: List[int] = [3, 4, 6, 3], num_classes: int = 1000, input_channels: int = 3, cardinality: int = 1, base_width: int = 64, stem_width: int = 64, stem_type: str = "", avg_down: bool = False, **kwargs, ): if block_type not in ["basic", "bottleneck"]: raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.") if stem_type not in ["", "deep", "deep-tiered"]: raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.") self.block_type = block_type self.layers = layers self.num_classes = num_classes self.input_channels = input_channels self.cardinality = cardinality self.base_width = base_width self.stem_width = stem_width self.stem_type = stem_type self.avg_down = avg_down super().__init__(**kwargs) ``` As três coisas importantes a serem lembradas ao escrever sua própria configuração são: - você tem que herdar de `PretrainedConfig`, - o `__init__` do seu `PretrainedConfig` deve aceitar quaisquer kwargs, - esses `kwargs` precisam ser passados para a superclasse `__init__`. A herança é para garantir que você obtenha todas as funcionalidades da biblioteca 🤗 Transformers, enquanto as outras duas restrições vêm do fato de um `PretrainedConfig` ter mais campos do que os que você está configurando. Ao recarregar um config com o método `from_pretrained`, esses campos precisam ser aceitos pelo seu config e então enviados para a superclasse. Definir um `model_type` para sua configuração (aqui `model_type="resnet"`) não é obrigatório, a menos que você queira registrar seu modelo com as classes automáticas (veja a última seção). Com isso feito, você pode facilmente criar e salvar sua configuração como faria com qualquer outra configuração de modelo da biblioteca. Aqui está como podemos criar uma configuração resnet50d e salvá-la: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d_config.save_pretrained("custom-resnet") ``` Isso salvará um arquivo chamado `config.json` dentro da pasta `custom-resnet`. Você pode então recarregar sua configuração com o método `from_pretrained`: ```py resnet50d_config = ResnetConfig.from_pretrained("custom-resnet") ``` Você também pode usar qualquer outro método da classe [`PretrainedConfig`], como [`~PretrainedConfig.push_to_hub`] para carregar diretamente sua configuração para o Hub. ## Escrevendo um modelo customizado Agora que temos nossa configuração ResNet, podemos continuar escrevendo o modelo. Na verdade, escreveremos dois: um que extrai os recursos ocultos de um lote de imagens (como [`BertModel`]) e um que é adequado para classificação de imagem (como [`BertForSequenceClassification`]). Como mencionamos antes, escreveremos apenas um wrapper solto do modelo para mantê-lo simples para este exemplo. A única coisa que precisamos fazer antes de escrever esta classe é um mapa entre os tipos de bloco e as classes de bloco reais. Então o modelo é definido a partir da configuração passando tudo para a classe `ResNet`: ```py from transformers import PreTrainedModel from timm.models.resnet import BasicBlock, Bottleneck, ResNet from .configuration_resnet import ResnetConfig BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck} class ResnetModel(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor): return self.model.forward_features(tensor) ``` Para o modelo que irá classificar as imagens, vamos apenas alterar o método forward: ```py import torch class ResnetModelForImageClassification(PreTrainedModel): config_class = ResnetConfig def __init__(self, config): super().__init__(config) block_layer = BLOCK_MAPPING[config.block_type] self.model = ResNet( block_layer, config.layers, num_classes=config.num_classes, in_chans=config.input_channels, cardinality=config.cardinality, base_width=config.base_width, stem_width=config.stem_width, stem_type=config.stem_type, avg_down=config.avg_down, ) def forward(self, tensor, labels=None): logits = self.model(tensor) if labels is not None: loss = torch.nn.functional.cross_entropy(logits, labels) return {"loss": loss, "logits": logits} return {"logits": logits} ``` Em ambos os casos, observe como herdamos de `PreTrainedModel` e chamamos a inicialização da superclasse com o `config` (um pouco parecido quando você escreve um `torch.nn.Module`). A linha que define o `config_class` não é obrigatória, a menos que você deseje registrar seu modelo com as classes automáticas (consulte a última seção). <Tip> Se o seu modelo for muito semelhante a um modelo dentro da biblioteca, você poderá reutilizar a mesma configuração desse modelo. </Tip> Você pode fazer com que seu modelo retorne o que você quiser,porém retornando um dicionário como fizemos para `ResnetModelForImageClassification`, com a função de perda incluída quando os rótulos são passados, vai tornar seu modelo diretamente utilizável dentro da classe [`Trainer`]. Você pode usar outro formato de saída, desde que esteja planejando usar seu próprio laço de treinamento ou outra biblioteca para treinamento. Agora que temos nossa classe do modelo, vamos criar uma: ```py resnet50d = ResnetModelForImageClassification(resnet50d_config) ``` Novamente, você pode usar qualquer um dos métodos do [`PreTrainedModel`], como [`~PreTrainedModel.save_pretrained`] ou [`~PreTrainedModel.push_to_hub`]. Usaremos o segundo na próxima seção e veremos como enviar os pesos e o código do nosso modelo. Mas primeiro, vamos carregar alguns pesos pré-treinados dentro do nosso modelo. Em seu próprio caso de uso, você provavelmente estará treinando seu modelo customizado em seus próprios dados. Para este tutorial ser rápido, usaremos a versão pré-treinada do resnet50d. Como nosso modelo é apenas um wrapper em torno dele, será fácil de transferir esses pesos: ```py import timm pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` Agora vamos ver como ter certeza de que quando fazemos [`~PreTrainedModel.save_pretrained`] ou [`~PreTrainedModel.push_to_hub`], o código do modelo é salvo. ## Enviando o código para o Hub <Tip warning={true}> Esta API é experimental e pode ter algumas pequenas alterações nas próximas versões. </Tip> Primeiro, certifique-se de que seu modelo esteja totalmente definido em um arquivo `.py`. Ele pode contar com importações relativas para alguns outros arquivos desde que todos os arquivos estejam no mesmo diretório (ainda não suportamos submódulos para este recurso). Para o nosso exemplo, vamos definir um arquivo `modeling_resnet.py` e um arquivo `configuration_resnet.py` em uma pasta no diretório de trabalho atual chamado `resnet_model`. O arquivo de configuração contém o código para `ResnetConfig` e o arquivo de modelagem contém o código do `ResnetModel` e `ResnetModelForImageClassification`. ``` . └── resnet_model ├── __init__.py ├── configuration_resnet.py └── modeling_resnet.py ``` O `__init__.py` pode estar vazio, apenas está lá para que o Python detecte que o `resnet_model` possa ser usado como um módulo. <Tip warning={true}> Se estiver copiando arquivos de modelagem da biblioteca, você precisará substituir todas as importações relativas na parte superior do arquivo para importar do pacote `transformers`. </Tip> Observe que você pode reutilizar (ou subclasse) uma configuração/modelo existente. Para compartilhar seu modelo com a comunidade, siga estas etapas: primeiro importe o modelo ResNet e a configuração do arquivos criados: ```py from resnet_model.configuration_resnet import ResnetConfig from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification ``` Então você tem que dizer à biblioteca que deseja copiar os arquivos de código desses objetos ao usar o `save_pretrained` e registrá-los corretamente com uma determinada classe automáticas (especialmente para modelos), basta executar: ```py ResnetConfig.register_for_auto_class() ResnetModel.register_for_auto_class("AutoModel") ResnetModelForImageClassification.register_for_auto_class("AutoModelForImageClassification") ``` Observe que não há necessidade de especificar uma classe automática para a configuração (há apenas uma classe automática, [`AutoConfig`]), mas é diferente para os modelos. Seu modelo customizado pode ser adequado para muitas tarefas diferentes, então você tem que especificar qual das classes automáticas é a correta para o seu modelo. Em seguida, vamos criar a configuração e os modelos como fizemos antes: ```py resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True) resnet50d = ResnetModelForImageClassification(resnet50d_config) pretrained_model = timm.create_model("resnet50d", pretrained=True) resnet50d.model.load_state_dict(pretrained_model.state_dict()) ``` Agora para enviar o modelo para o Hub, certifique-se de estar logado. Ou execute no seu terminal: ```bash huggingface-cli login ``` ou a partir do notebook: ```py from huggingface_hub import notebook_login notebook_login() ``` Você pode então enviar para seu próprio namespace (ou uma organização da qual você é membro) assim: ```py resnet50d.push_to_hub("custom-resnet50d") ``` Além dos pesos do modelo e da configuração no formato json, isso também copiou o modelo e configuração `.py` na pasta `custom-resnet50d` e carregou o resultado para o Hub. Você pode conferir o resultado neste [repositório de modelos](https://huggingface.co/sgugger/custom-resnet50d). Consulte o [tutorial de compartilhamento](model_sharing) para obter mais informações sobre o método push_to_hub. ## Usando um modelo com código customizado Você pode usar qualquer configuração, modelo ou tokenizador com arquivos de código customizados em seu repositório com as classes automáticas e o método `from_pretrained`. Todos os arquivos e códigos carregados no Hub são verificados quanto a malware (consulte a documentação de [Segurança do Hub](https://huggingface.co/docs/hub/security#malware-scanning) para obter mais informações), mas você ainda deve revisar o código do modelo e o autor para evitar a execução de código malicioso em sua máquina. Defina `trust_remote_code=True` para usar um modelo com código customizado: ```py from transformers import AutoModelForImageClassification model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True) ``` Também é fortemente recomendado passar um hash de confirmação como uma `revisão` para garantir que o autor dos modelos não atualize o código com novas linhas maliciosas (a menos que você confie totalmente nos autores dos modelos). ```py commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292" model = AutoModelForImageClassification.from_pretrained( "sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash ) ``` Observe que ao navegar no histórico de commits do repositório do modelo no Hub, há um botão para copiar facilmente o commit hash de qualquer commit. ## Registrando um modelo com código customizado para as classes automáticas Se você estiver escrevendo uma biblioteca que estende 🤗 Transformers, talvez queira estender as classes automáticas para incluir seus próprios modelos. Isso é diferente de enviar o código para o Hub no sentido de que os usuários precisarão importar sua biblioteca para obter os modelos customizados (ao contrário de baixar automaticamente o código do modelo do Hub). Desde que sua configuração tenha um atributo `model_type` diferente dos tipos de modelo existentes e que as classes do seu modelo tenha os atributos `config_class` corretos, você pode simplesmente adicioná-los às classes automáticas assim: ```py from transformers import AutoConfig, AutoModel, AutoModelForImageClassification AutoConfig.register("resnet", ResnetConfig) AutoModel.register(ResnetConfig, ResnetModel) AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification) ``` Observe que o primeiro argumento usado ao registrar sua configuração customizada para [`AutoConfig`] precisa corresponder ao `model_type` de sua configuração customizada. E o primeiro argumento usado ao registrar seus modelos customizados, para qualquer necessidade de classe de modelo automático deve corresponder ao `config_class` desses modelos.
transformers/docs/source/pt/custom_models.md/0
{ "file_path": "transformers/docs/source/pt/custom_models.md", "repo_id": "transformers", "token_count": 5917 }
308
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # `FeatureExtractors`的工具 此页面列出了音频 [`FeatureExtractor`] 可以使用的所有实用函数,以便使用常见的算法(如 *Short Time Fourier Transform* 或 *log mel spectrogram*)从原始音频中计算特殊特征。 其中大多数仅在您研究库中音频processors的代码时有用。 ## 音频转换 [[autodoc]] audio_utils.hertz_to_mel [[autodoc]] audio_utils.mel_to_hertz [[autodoc]] audio_utils.mel_filter_bank [[autodoc]] audio_utils.optimal_fft_length [[autodoc]] audio_utils.window_function [[autodoc]] audio_utils.spectrogram [[autodoc]] audio_utils.power_to_db [[autodoc]] audio_utils.amplitude_to_db
transformers/docs/source/zh/internal/audio_utils.md/0
{ "file_path": "transformers/docs/source/zh/internal/audio_utils.md", "repo_id": "transformers", "token_count": 515 }
309
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 训练用的定制硬件 您用来运行模型训练和推断的硬件可能会对性能产生重大影响。要深入了解 GPU,务必查看 Tim Dettmer 出色的[博文](https://timdettmers.com/2020/09/07/which-gpu-for-deep-learning/)。 让我们来看一些关于 GPU 配置的实用建议。 ## GPU 当你训练更大的模型时,基本上有三种选择: - 更大的 GPU - 更多的 GPU - 更多的 CPU 和 NVMe(通过[DeepSpeed-Infinity](main_classes/deepspeed#nvme-support)实现) 让我们从只有一块GPU的情况开始。 ### 供电和散热 如果您购买了昂贵的高端GPU,请确保为其提供正确的供电和足够的散热。 **供电**: 一些高端消费者级GPU卡具有2个,有时甚至3个PCI-E-8针电源插口。请确保将与插口数量相同的独立12V PCI-E-8针线缆插入卡中。不要使用同一根线缆两端的2个分叉(也称为pigtail cable)。也就是说,如果您的GPU上有2个插口,您需要使用2条PCI-E-8针线缆连接电源和卡,而不是使用一条末端有2个PCI-E-8针连接器的线缆!否则,您无法充分发挥卡的性能。 每个PCI-E-8针电源线缆需要插入电源侧的12V轨上,并且可以提供最多150W的功率。 其他一些卡可能使用PCI-E-12针连接器,这些连接器可以提供最多500-600W的功率。 低端卡可能使用6针连接器,这些连接器可提供最多75W的功率。 此外,您需要选择具有稳定电压的高端电源。一些质量较低的电源可能无法为卡提供所需的稳定电压以发挥其最大性能。 当然,电源还需要有足够的未使用的瓦数来为卡供电。 **散热**: 当GPU过热时,它将开始降频,不会提供完整的性能。如果温度过高,可能会缩短GPU的使用寿命。 当GPU负载很重时,很难确定最佳温度是多少,但任何低于+80度的温度都是好的,越低越好,也许在70-75度之间是一个非常好的范围。降频可能从大约84-90度开始。但是除了降频外,持续的高温可能会缩短GPU的使用寿命。 接下来让我们看一下拥有多个GPU时最重要的方面之一:连接。 ### 多GPU连接 如果您使用多个GPU,则卡之间的互连方式可能会对总训练时间产生巨大影响。如果GPU位于同一物理节点上,您可以运行以下代码: ```bash nvidia-smi topo -m ``` 它将告诉您GPU如何互连。在具有双GPU并通过NVLink连接的机器上,您最有可能看到类似以下内容: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X NV2 0-23 N/A GPU1 NV2 X 0-23 N/A ``` 在不同的机器上,如果没有NVLink,我们可能会看到: ``` GPU0 GPU1 CPU Affinity NUMA Affinity GPU0 X PHB 0-11 N/A GPU1 PHB X 0-11 N/A ``` 这个报告包括了这个输出: ``` X = Self SYS = Connection traversing PCIe as well as the SMP interconnect between NUMA nodes (e.g., QPI/UPI) NODE = Connection traversing PCIe as well as the interconnect between PCIe Host Bridges within a NUMA node PHB = Connection traversing PCIe as well as a PCIe Host Bridge (typically the CPU) PXB = Connection traversing multiple PCIe bridges (without traversing the PCIe Host Bridge) PIX = Connection traversing at most a single PCIe bridge NV# = Connection traversing a bonded set of # NVLinks ``` 因此,第一个报告`NV2`告诉我们GPU通过2个NVLink互连,而第二个报告`PHB`展示了典型的消费者级PCIe+Bridge设置。 检查你的设置中具有哪种连接类型。其中一些会使卡之间的通信更快(例如NVLink),而其他则较慢(例如PHB)。 根据使用的扩展解决方案的类型,连接速度可能会产生重大或较小的影响。如果GPU很少需要同步,就像在DDP中一样,那么较慢的连接的影响将不那么显著。如果GPU经常需要相互发送消息,就像在ZeRO-DP中一样,那么更快的连接对于实现更快的训练变得非常重要。 #### NVlink [NVLink](https://en.wikipedia.org/wiki/NVLink)是由Nvidia开发的一种基于线缆的串行多通道近程通信链接。 每个新一代提供更快的带宽,例如在[Nvidia Ampere GA102 GPU架构](https://www.nvidia.com/content/dam/en-zz/Solutions/geforce/ampere/pdf/NVIDIA-ampere-GA102-GPU-Architecture-Whitepaper-V1.pdf)中有这样的引述: > Third-Generation NVLink® > GA102 GPUs utilize NVIDIA’s third-generation NVLink interface, which includes four x4 links, > with each link providing 14.0625 GB/sec bandwidth in each direction between two GPUs. Four > links provide 56.25 GB/sec bandwidth in each direction, and 112.5 GB/sec total bandwidth > between two GPUs. Two RTX 3090 GPUs can be connected together for SLI using NVLink. > (Note that 3-Way and 4-Way SLI configurations are not supported.) 所以,在`nvidia-smi topo -m`输出的`NVX`报告中获取到的更高的`X`值意味着更好的性能。生成的结果将取决于您的GPU架构。 让我们比较在小样本wikitext上训练gpt2语言模型的执行结果。 结果是: | NVlink | Time | | ----- | ---: | | Y | 101s | | N | 131s | 可以看到,NVLink使训练速度提高了约23%。在第二个基准测试中,我们使用`NCCL_P2P_DISABLE=1`告诉GPU不要使用NVLink。 这里是完整的基准测试代码和输出: ```bash # DDP w/ NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train \ --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 101.9003, 'train_samples_per_second': 1.963, 'epoch': 0.69} # DDP w/o NVLink rm -r /tmp/test-clm; CUDA_VISIBLE_DEVICES=0,1 NCCL_P2P_DISABLE=1 torchrun \ --nproc_per_node 2 examples/pytorch/language-modeling/run_clm.py --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 --do_train --output_dir /tmp/test-clm --per_device_train_batch_size 4 --max_steps 200 {'train_runtime': 131.4367, 'train_samples_per_second': 1.522, 'epoch': 0.69} ``` 硬件: 2x TITAN RTX 24GB each + NVlink with 2 NVLinks (`NV2` in `nvidia-smi topo -m`) 软件: `pytorch-1.8-to-be` + `cuda-11.0` / `transformers==4.3.0.dev0`
transformers/docs/source/zh/perf_hardware.md/0
{ "file_path": "transformers/docs/source/zh/perf_hardware.md", "repo_id": "transformers", "token_count": 3946 }
310
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Transformers Agents <Tip warning={true}> `Transformers Agents`是一个实验性的随时可能发生变化的API。由于API或底层模型可能发生变化,`agents`返回的结果也会有所不同。 </Tip> Transformers版本`v4.29.0`基于`tools`和`agents`概念构建。您可以在[此Colab链接](https://colab.research.google.com/drive/1c7MHD-T1forUPGcC_jlwsIptOzpG3hSj)中进行测试。 简而言之,它在`Transformers`之上提供了一个自然语言API:我们定义了一组经过筛选的`tools`,并设计了一个`agents`来解读自然语言并使用这些工具。它具有很强的可扩展性;我们筛选了一些相关的`tools`,但我们将向您展示如何通过社区开发的`tool`轻松地扩展系统。 让我们从一些可以通过这个新API实现的示例开始。在处理多模态任务时它尤其强大,因此让我们快速试着生成图像并大声朗读文本。 ```py agent.run("Caption the following image", image=image) ``` | **输入** | **输出** | |-----------------------------------------------------------------------------------------------------------------------------|-----------------------------------| | <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/beaver.png" width=200> | A beaver is swimming in the water | --- ```py agent.run("Read the following text out loud", text=text) ``` | **输入** | **输出** | |-----------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | A beaver is swimming in the water | <audio controls><source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tts_example.wav" type="audio/wav"> your browser does not support the audio element. </audio> --- ```py agent.run( "In the following `document`, where will the TRRF Scientific Advisory Council Meeting take place?", document=document, ) ``` | **输入** | **输出** | |-----------------------------------------------------------------------------------------------------------------------------|----------------| | <img src="https://datasets-server.huggingface.co/assets/hf-internal-testing/example-documents/--/hf-internal-testing--example-documents/test/0/image/image.jpg" width=200> | ballroom foyer | ## 快速入门 要使用 `agent.run`,您需要实例化一个`agent`,它是一个大型语言模型(LLM)。我们支持OpenAI模型以及来自BigCode和OpenAssistant的开源替代方案。OpenAI模型性能更好(但需要您拥有OpenAI API密钥,因此无法免费使用),Hugging Face为BigCode和OpenAssistant模型提供了免费访问端点。 一开始请安装`agents`附加模块,以安装所有默认依赖项。 ```bash pip install transformers[agents] ``` 要使用OpenAI模型,您可以在安装`openai`依赖项后实例化一个`OpenAiAgent`: ```bash pip install openai ``` ```py from transformers import OpenAiAgent agent = OpenAiAgent(model="text-davinci-003", api_key="<your_api_key>") ``` 要使用BigCode或OpenAssistant,请首先登录以访问Inference API: ```py from huggingface_hub import login login("<YOUR_TOKEN>") ``` 然后,实例化`agent`: ```py from transformers import HfAgent # Starcoder agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") # StarcoderBase # agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoderbase") # OpenAssistant # agent = HfAgent(url_endpoint="https://api-inference.huggingface.co/models/OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5") ``` 此示例使用了目前Hugging Face免费提供的推理API。如果你有自己的推理端点用于此模型(或其他模型),你可以用你的URL替换上面的URL。 <Tip> StarCoder和OpenAssistant可以免费使用,并且在简单任务上表现出色。然而,当处理更复杂的提示时就不再有效。如果你遇到这样的问题,我们建议尝试使用OpenAI模型,尽管遗憾的是它不是开源的,但它在目前情况下表现更好。 </Tip> 现在,您已经可以开始使用了!让我们深入了解您现在可以使用的两个API。 ### 单次执行(run) 单次执行方法是使用`agent`的 `~Agent.run`: ```py agent.run("Draw me a picture of rivers and lakes.") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" width=200> 它会自动选择适合您要执行的任务的`tool`(或`tools`),并以适当的方式运行它们。它可以在同一指令中执行一个或多个任务(尽管您的指令越复杂,`agent`失败的可能性就越大)。 ```py agent.run("Draw me a picture of the sea then transform the picture to add an island") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/sea_and_island.png" width=200> <br/> 每个 [`~Agent.run`] 操作都是独立的,因此您可以多次连续运行 [`~Agent.run`]并执行不同的任务。 请注意,您的 `agent` 只是一个大型语言模型,因此您略有变化的提示可能会产生完全不同的结果。重要的是尽可能清晰地解释您要执行的任务。我们在[这里](../en/custom_tools#writing-good-user-inputs)更深入地讨论了如何编写良好的提示。 如果您想在多次执行之间保持同一状态或向`agent`传递非文本对象,可以通过指定`agent`要使用的变量来实现。例如,您可以生成有关河流和湖泊的第一幅图像,并要求模型通过执行以下操作向该图片添加一个岛屿: ```python picture = agent.run("Generate a picture of rivers and lakes.") updated_picture = agent.run("Transform the image in `picture` to add an island to it.", picture=picture) ``` <Tip> 当模型无法理解您的请求和库中的工具时,这可能会有所帮助。例如: ```py agent.run("Draw me the picture of a capybara swimming in the sea") ``` 在这种情况下,模型可以以两种方式理解您的请求: - 使用`text-to-image` 生成在大海中游泳的大水獭 - 或者,使用`text-to-image`生成大水獭,然后使用`image-transformation`工具使其在大海中游泳 如果您想强制使用第一种情景,可以通过将提示作为参数传递给它来实现: ```py agent.run("Draw me a picture of the `prompt`", prompt="a capybara swimming in the sea") ``` </Tip> ### 基于交流的执行 (chat) 基于交流的执行(chat)方式是使用 [`~Agent.chat`]: ```py agent.chat("Generate a picture of rivers and lakes") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" width=200> ```py agent.chat("Transform the picture so that there is a rock in there") ``` <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes_and_beaver.png" width=200> <br/> 当您希望在不同指令之间保持同一状态时,这会是一个有趣的方法。它更适合用于单个指令,而不是复杂的多步指令(`~Agent.run` 方法更适合处理这种情况)。 这种方法也可以接受参数,以便您可以传递非文本类型或特定提示。 ### ⚠️ 远程执行 出于演示目的以便适用于所有设置,我们为发布版本的少数默认工具创建了远程执行器。这些工具是使用推理终端(inference endpoints)创建的。 目前我们已将其关闭,但为了了解如何自行设置远程执行器工具,我们建议阅读[自定义工具指南](./custom_tools)。 ### 这里发生了什么?什么是`tools`,什么是`agents`? <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/diagram.png"> #### Agents 这里的`Agents`是一个大型语言模型,我们通过提示它以访问特定的工具集。 大型语言模型在生成小代码示例方面表现出色,因此这个API利用这一特点,通过提示LLM生成一个使用`tools`集合的小代码示例。然后,根据您给`Agents`的任务和`tools`的描述来完成此提示。这种方式让它能够访问工具的文档,特别是它们的期望输入和输出,以生成相关的代码。 #### Tools `Tools`非常简单:它们是有名称和描述的单个函数。然后,我们使用这些`tools`的描述来提示代理。通过提示,我们向`agent`展示如何使用`tool`来执行查询语言中请求的操作。 这是使用全新`tools`而不是`pipelines`,因为`agent`编写的代码更好,具有非常原子化的`tools`。`pipelines`经常被重构,并且通常将多个任务合并为一个。`tools`旨在专注于一个非常简单的任务。 #### 代码执行? 然后,这段代码基于`tools`的输入被我们的小型Python解释器执行。我们听到你在后面大声呼喊“任意代码执行!”,但让我们解释为什么情况并非如此。 只能您提供的`tools`和打印函数可以被执行,因此您已经受到了执行的限制。如果仅限于 Hugging Face 工具,那么您应该是安全的。 然后,我们不允许任何属性查找或导入(无论如何都不需要将输入/输出传递给一小组函数),因此所有最明显的攻击(并且您需要提示LLM无论如何输出它们)不应该是一个问题。如果你想超级安全,你可以使用附加参数 return_code=True 执行 run() 方法,在这种情况下,`agent`将只返回要执行的代码,你可以决定是否执行。 如果`agent`生成的代码存在任何尝试执行非法操作的行为,或者代码中出现了常规Python错误,执行将停止。 ### 一组经过精心筛选的`tools` 我们确定了一组可以赋予这些`agent`强大能力的`tools`。以下是我们在`transformers`中集成的`tools`的更新列表: - **文档问答**:给定一个图像格式的文档(例如PDF),回答该文档上的问题([Donut](../en/model_doc/donut)) - **文本问答**:给定一段长文本和一个问题,回答文本中的问题([Flan-T5](../en/model_doc/flan-t5)) - **无条件图像字幕**:为图像添加字幕!([BLIP](../en/model_doc/blip)) - **图像问答**:给定一张图像,回答该图像上的问题([VILT](../en/model_doc/vilt)) - **图像分割**:给定一张图像和一个提示,输出该提示的分割掩模([CLIPSeg](../en/model_doc/clipseg)) - **语音转文本**:给定一个人说话的音频录音,将演讲内容转录为文本([Whisper](../en/model_doc/whisper)) - **文本转语音**:将文本转换为语音([SpeechT5](../en/model_doc/speecht5)) - **Zero-Shot文本分类**:给定一个文本和一个标签列表,确定文本最符合哪个标签([BART](../en/model_doc/bart)) - **文本摘要**:总结长文本为一两句话([BART](../en/model_doc/bart)) - **翻译**:将文本翻译为指定语言([NLLB](../en/model_doc/nllb)) 这些`tools`已在transformers中集成,并且也可以手动使用,例如: ```py from transformers import load_tool tool = load_tool("text-to-speech") audio = tool("This is a text to speech tool") ``` ### 自定义工具 尽管我们确定了一组经过筛选的`tools`,但我们坚信,此实现提供的主要价值在于能够快速创建和共享自定义`tool`。 通过将工具的代码上传到Hugging Face空间或模型repository,您可以直接通过`agent`使用`tools`。我们已经添加了一些**与transformers无关**的`tools`到[`huggingface-tools`组织](https://huggingface.co/huggingface-tools)中: - **文本下载器**:从Web URL下载文本 - **文本到图像**:根据提示生成图像,利用`stable diffusion` - **图像转换**:根据初始图像和提示修改图像,利用`instruct pix2pix stable diffusion` - **文本到视频**:根据提示生成小视频,利用`damo-vilab` 从一开始就一直在使用的文本到图像`tool`是一个远程`tool `,位于[*huggingface-tools/text-to-image*](https://huggingface.co/spaces/huggingface-tools/text-to-image)!我们将继续在此组织和其他组织上发布此类`tool`,以进一步增强此实现。 `agents`默认可以访问存储在[`huggingface-tools`](https://huggingface.co/huggingface-tools)上的`tools`。我们将在后续指南中解释如何编写和共享自定义`tools`,以及如何利用Hub上存在的任何自定义`tools`。 ### 代码生成 到目前为止,我们已经展示了如何使用`agents`来为您执行操作。但是,`agents`仅使用非常受限Python解释器执行的代码。如果您希望在不同的环境中使用生成的代码,可以提示`agents`返回代码,以及`tools`的定义和准确的导入信息。 例如,以下指令 ```python agent.run("Draw me a picture of rivers and lakes", return_code=True) ``` 返回以下代码 ```python from transformers import load_tool image_generator = load_tool("huggingface-tools/text-to-image") image = image_generator(prompt="rivers and lakes") ``` 然后你就可以调整并执行代码
transformers/docs/source/zh/transformers_agents.md/0
{ "file_path": "transformers/docs/source/zh/transformers_agents.md", "repo_id": "transformers", "token_count": 8118 }
311
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import logging import os import sys from unittest.mock import patch from transformers.testing_utils import TestCasePlus, get_gpu_count, slow SRC_DIRS = [ os.path.join(os.path.dirname(__file__), dirname) for dirname in [ "text-classification", "language-modeling", "summarization", "token-classification", "question-answering", "speech-recognition", ] ] sys.path.extend(SRC_DIRS) if SRC_DIRS is not None: import run_clm_flax import run_flax_glue import run_flax_ner import run_flax_speech_recognition_seq2seq import run_mlm_flax import run_qa import run_summarization_flax import run_t5_mlm_flax logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() def get_setup_file(): parser = argparse.ArgumentParser() parser.add_argument("-f") args = parser.parse_args() return args.f def get_results(output_dir, split="eval"): path = os.path.join(output_dir, f"{split}_results.json") if os.path.exists(path): with open(path, "r") as f: return json.load(f) raise ValueError(f"can't find {path}") stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class ExamplesTests(TestCasePlus): def test_run_glue(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_glue.py --model_name_or_path distilbert/distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --eval_steps=2 --warmup_steps=2 --seed=42 --max_seq_length=128 """.split() with patch.object(sys, "argv", testargs): run_flax_glue.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.75) @slow def test_run_clm(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_clm_flax.py --model_name_or_path distilbert/distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --block_size 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(sys, "argv", testargs): run_clm_flax.main() result = get_results(tmp_dir) self.assertLess(result["eval_perplexity"], 100) @slow def test_run_summarization(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_summarization.py --model_name_or_path google-t5/t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --test_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=8 --do_train --do_eval --do_predict --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(sys, "argv", testargs): run_summarization_flax.main() result = get_results(tmp_dir, split="test") self.assertGreaterEqual(result["test_rouge1"], 10) self.assertGreaterEqual(result["test_rouge2"], 2) self.assertGreaterEqual(result["test_rougeL"], 7) self.assertGreaterEqual(result["test_rougeLsum"], 7) @slow def test_run_mlm(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_mlm.py --model_name_or_path distilbert/distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --overwrite_output_dir --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --logging_steps 2 --eval_steps 2 --do_train --do_eval --num_train_epochs=1 """.split() with patch.object(sys, "argv", testargs): run_mlm_flax.main() result = get_results(tmp_dir) self.assertLess(result["eval_perplexity"], 42) @slow def test_run_t5_mlm(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_t5_mlm_flax.py --model_name_or_path google-t5/t5-small --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --do_train --do_eval --max_seq_length 128 --per_device_train_batch_size 4 --per_device_eval_batch_size 4 --num_train_epochs 2 --logging_steps 2 --eval_steps 2 --output_dir {tmp_dir} --overwrite_output_dir """.split() with patch.object(sys, "argv", testargs): run_t5_mlm_flax.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.42) @slow def test_run_ner(self): # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu epochs = 7 if get_gpu_count() > 1 else 2 tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_flax_ner.py --model_name_or_path google-bert/bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --overwrite_output_dir --do_train --do_eval --warmup_steps=2 --learning_rate=2e-4 --logging_steps 2 --eval_steps 2 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 """.split() with patch.object(sys, "argv", testargs): run_flax_ner.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_accuracy"], 0.75) self.assertGreaterEqual(result["eval_f1"], 0.3) @slow def test_run_qa(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_qa.py --model_name_or_path google-bert/bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=3 --warmup_steps=2 --do_train --do_eval --logging_steps 2 --eval_steps 2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 """.split() with patch.object(sys, "argv", testargs): run_qa.main() result = get_results(tmp_dir) self.assertGreaterEqual(result["eval_f1"], 30) self.assertGreaterEqual(result["eval_exact"], 30) @slow def test_run_flax_speech_recognition_seq2seq(self): tmp_dir = self.get_auto_remove_tmp_dir() testargs = f""" run_flax_speech_recognition_seq2seq.py --model_name_or_path openai/whisper-tiny.en --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config clean --train_split_name validation --eval_split_name validation --trust_remote_code --output_dir {tmp_dir} --overwrite_output_dir --num_train_epochs=2 --max_train_samples 10 --max_eval_samples 10 --warmup_steps=8 --do_train --do_eval --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --predict_with_generate """.split() with patch.object(sys, "argv", testargs): run_flax_speech_recognition_seq2seq.main() result = get_results(tmp_dir, split="eval") self.assertLessEqual(result["eval_wer"], 0.05)
transformers/examples/flax/test_flax_examples.py/0
{ "file_path": "transformers/examples/flax/test_flax_examples.py", "repo_id": "transformers", "token_count": 4835 }
312
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Multiple choice fine-tuning: utilities to work with multiple choice tasks of reading comprehension""" import csv import glob import json import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional import tqdm from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available logger = logging.getLogger(__name__) @dataclass(frozen=True) class InputExample: """ A single training/test example for multiple choice Args: example_id: Unique id for the example. question: string. The untokenized text of the second sequence (question). contexts: list of str. The untokenized text of the first sequence (context of corresponding question). endings: list of str. multiple choice's options. Its length must be equal to contexts' length. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ example_id: str question: str contexts: List[str] endings: List[str] label: Optional[str] @dataclass(frozen=True) class InputFeatures: """ A single set of features of data. Property names are the same names as the corresponding inputs to a model. """ example_id: str input_ids: List[List[int]] attention_mask: Optional[List[List[int]]] token_type_ids: Optional[List[List[int]]] label: Optional[int] class Split(Enum): train = "train" dev = "dev" test = "test" if is_torch_available(): import torch from torch.utils.data import Dataset class MultipleChoiceDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] def __init__( self, data_dir: str, tokenizer: PreTrainedTokenizer, task: str, max_seq_length: Optional[int] = None, overwrite_cache=False, mode: Split = Split.train, ): processor = processors[task]() cached_features_file = os.path.join( data_dir, "cached_{}_{}_{}_{}".format( mode.value, tokenizer.__class__.__name__, str(max_seq_length), task, ), ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: logger.info(f"Loading features from cached file {cached_features_file}") self.features = torch.load(cached_features_file) else: logger.info(f"Creating features from dataset file at {data_dir}") label_list = processor.get_labels() if mode == Split.dev: examples = processor.get_dev_examples(data_dir) elif mode == Split.test: examples = processor.get_test_examples(data_dir) else: examples = processor.get_train_examples(data_dir) logger.info("Training examples: %s", len(examples)) self.features = convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, ) logger.info("Saving features into cached file %s", cached_features_file) torch.save(self.features, cached_features_file) def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] if is_tf_available(): import tensorflow as tf class TFMultipleChoiceDataset: """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] def __init__( self, data_dir: str, tokenizer: PreTrainedTokenizer, task: str, max_seq_length: Optional[int] = 128, overwrite_cache=False, mode: Split = Split.train, ): processor = processors[task]() logger.info(f"Creating features from dataset file at {data_dir}") label_list = processor.get_labels() if mode == Split.dev: examples = processor.get_dev_examples(data_dir) elif mode == Split.test: examples = processor.get_test_examples(data_dir) else: examples = processor.get_train_examples(data_dir) logger.info("Training examples: %s", len(examples)) self.features = convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) self.dataset = tf.data.Dataset.from_generator( gen, ( { "example_id": tf.int32, "input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32, }, tf.int64, ), ( { "example_id": tf.TensorShape([]), "input_ids": tf.TensorShape([None, None]), "attention_mask": tf.TensorShape([None, None]), "token_type_ids": tf.TensorShape([None, None]), }, tf.TensorShape([]), ), ) def get_dataset(self): self.dataset = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features))) return self.dataset def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] class DataProcessor: """Base class for data converters for multiple choice data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_test_examples(self, data_dir): """Gets a collection of `InputExample`s for the test set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() class RaceProcessor(DataProcessor): """Processor for the RACE data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) high = os.path.join(data_dir, "train/high") middle = os.path.join(data_dir, "train/middle") high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) high = os.path.join(data_dir, "dev/high") middle = os.path.join(data_dir, "dev/middle") high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, "dev") def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} test".format(data_dir)) high = os.path.join(data_dir, "test/high") middle = os.path.join(data_dir, "test/middle") high = self._read_txt(high) middle = self._read_txt(middle) return self._create_examples(high + middle, "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_txt(self, input_dir): lines = [] files = glob.glob(input_dir + "/*txt") for file in tqdm.tqdm(files, desc="read files"): with open(file, "r", encoding="utf-8") as fin: data_raw = json.load(fin) data_raw["race_id"] = file lines.append(data_raw) return lines def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for _, data_raw in enumerate(lines): race_id = "%s-%s" % (set_type, data_raw["race_id"]) article = data_raw["article"] for i in range(len(data_raw["answers"])): truth = str(ord(data_raw["answers"][i]) - ord("A")) question = data_raw["questions"][i] options = data_raw["options"][i] examples.append( InputExample( example_id=race_id, question=question, contexts=[article, article, article, article], # this is not efficient but convenient endings=[options[0], options[1], options[2], options[3]], label=truth, ) ) return examples class SynonymProcessor(DataProcessor): """Processor for the Synonym data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "mctrain.csv")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "mchp.csv")), "dev") def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "mctest.csv")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3", "4"] def _read_csv(self, input_file): with open(input_file, "r", encoding="utf-8") as f: return list(csv.reader(f)) def _create_examples(self, lines: List[List[str]], type: str): """Creates examples for the training and dev sets.""" examples = [ InputExample( example_id=line[0], question="", # in the swag dataset, the # common beginning of each # choice is stored in "sent2". contexts=[line[1], line[1], line[1], line[1], line[1]], endings=[line[2], line[3], line[4], line[5], line[6]], label=line[7], ) for line in lines # we skip the line with the column names ] return examples class SwagProcessor(DataProcessor): """Processor for the SWAG data set.""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "train.csv")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_csv(os.path.join(data_dir, "val.csv")), "dev") def get_test_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) raise ValueError( "For swag testing, the input file does not contain a label column. It can not be tested in current code " "setting!" ) return self._create_examples(self._read_csv(os.path.join(data_dir, "test.csv")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_csv(self, input_file): with open(input_file, "r", encoding="utf-8") as f: return list(csv.reader(f)) def _create_examples(self, lines: List[List[str]], type: str): """Creates examples for the training and dev sets.""" if type == "train" and lines[0][-1] != "label": raise ValueError("For training, the input file must contain a label column.") examples = [ InputExample( example_id=line[2], question=line[5], # in the swag dataset, the # common beginning of each # choice is stored in "sent2". contexts=[line[4], line[4], line[4], line[4]], endings=[line[7], line[8], line[9], line[10]], label=line[11], ) for line in lines[1:] # we skip the line with the column names ] return examples class ArcProcessor(DataProcessor): """Processor for the ARC data set (request from allennlp).""" def get_train_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} train".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "train.jsonl")), "train") def get_dev_examples(self, data_dir): """See base class.""" logger.info("LOOKING AT {} dev".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "dev.jsonl")), "dev") def get_test_examples(self, data_dir): logger.info("LOOKING AT {} test".format(data_dir)) return self._create_examples(self._read_json(os.path.join(data_dir, "test.jsonl")), "test") def get_labels(self): """See base class.""" return ["0", "1", "2", "3"] def _read_json(self, input_file): with open(input_file, "r", encoding="utf-8") as fin: lines = fin.readlines() return lines def _create_examples(self, lines, type): """Creates examples for the training and dev sets.""" # There are two types of labels. They should be normalized def normalize(truth): if truth in "ABCD": return ord(truth) - ord("A") elif truth in "1234": return int(truth) - 1 else: logger.info("truth ERROR! %s", str(truth)) return None examples = [] three_choice = 0 four_choice = 0 five_choice = 0 other_choices = 0 # we deleted example which has more than or less than four choices for line in tqdm.tqdm(lines, desc="read arc data"): data_raw = json.loads(line.strip("\n")) if len(data_raw["question"]["choices"]) == 3: three_choice += 1 continue elif len(data_raw["question"]["choices"]) == 5: five_choice += 1 continue elif len(data_raw["question"]["choices"]) != 4: other_choices += 1 continue four_choice += 1 truth = str(normalize(data_raw["answerKey"])) assert truth != "None" question_choices = data_raw["question"] question = question_choices["stem"] id = data_raw["id"] options = question_choices["choices"] if len(options) == 4: examples.append( InputExample( example_id=id, question=question, contexts=[ options[0]["para"].replace("_", ""), options[1]["para"].replace("_", ""), options[2]["para"].replace("_", ""), options[3]["para"].replace("_", ""), ], endings=[options[0]["text"], options[1]["text"], options[2]["text"], options[3]["text"]], label=truth, ) ) if type == "train": assert len(examples) > 1 assert examples[0].label is not None logger.info("len examples: %s}", str(len(examples))) logger.info("Three choices: %s", str(three_choice)) logger.info("Five choices: %s", str(five_choice)) logger.info("Other choices: %s", str(other_choices)) logger.info("four choices: %s", str(four_choice)) return examples def convert_examples_to_features( examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer, ) -> List[InputFeatures]: """ Loads a data file into a list of `InputFeatures` """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for ex_index, example in tqdm.tqdm(enumerate(examples), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) choices_inputs = [] for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)): text_a = context if example.question.find("_") != -1: # this is for cloze question text_b = example.question.replace("_", ending) else: text_b = example.question + " " + ending inputs = tokenizer( text_a, text_b, add_special_tokens=True, max_length=max_length, padding="max_length", truncation=True, return_overflowing_tokens=True, ) if "num_truncated_tokens" in inputs and inputs["num_truncated_tokens"] > 0: logger.info( "Attention! you are cropping tokens (swag task is ok). " "If you are training ARC and RACE and you are poping question + options, " "you need to try to use a bigger max seq length!" ) choices_inputs.append(inputs) label = label_map[example.label] input_ids = [x["input_ids"] for x in choices_inputs] attention_mask = ( [x["attention_mask"] for x in choices_inputs] if "attention_mask" in choices_inputs[0] else None ) token_type_ids = ( [x["token_type_ids"] for x in choices_inputs] if "token_type_ids" in choices_inputs[0] else None ) features.append( InputFeatures( example_id=example.example_id, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label, ) ) for f in features[:2]: logger.info("*** Example ***") logger.info("feature: %s" % f) return features processors = {"race": RaceProcessor, "swag": SwagProcessor, "arc": ArcProcessor, "syn": SynonymProcessor} MULTIPLE_CHOICE_TASKS_NUM_LABELS = {"race", 4, "swag", 4, "arc", 4, "syn", 5}
transformers/examples/legacy/multiple_choice/utils_multiple_choice.py/0
{ "file_path": "transformers/examples/legacy/multiple_choice/utils_multiple_choice.py", "repo_id": "transformers", "token_count": 10031 }
313
#!/usr/bin/env python # coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Transformer XL model evaluation script. Adapted from https://github.com/kimiyoung/transformer-xl. In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/eval.py This script with default values evaluates a pretrained Transformer-XL on WikiText 103 """ import argparse import logging import math import time import torch from transformers import TransfoXLCorpus, TransfoXLLMHeadModel logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) logger = logging.getLogger(__name__) def main(): parser = argparse.ArgumentParser(description="PyTorch Transformer Language Model") parser.add_argument("--model_name", type=str, default="transfo-xl/transfo-xl-wt103", help="pretrained model name") parser.add_argument( "--split", type=str, default="test", choices=["all", "valid", "test"], help="which split to evaluate" ) parser.add_argument("--batch_size", type=int, default=10, help="batch size") parser.add_argument("--tgt_len", type=int, default=128, help="number of tokens to predict") parser.add_argument("--ext_len", type=int, default=0, help="length of the extended context") parser.add_argument("--mem_len", type=int, default=1600, help="length of the retained previous heads") parser.add_argument("--clamp_len", type=int, default=1000, help="max positional embedding index") parser.add_argument("--no_cuda", action="store_true", help="Do not use CUDA even though CUA is available") parser.add_argument("--work_dir", type=str, required=True, help="path to the work_dir") parser.add_argument("--no_log", action="store_true", help="do not log the eval result") parser.add_argument("--same_length", action="store_true", help="set same length attention with masking") parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") args = parser.parse_args() assert args.ext_len >= 0, "extended context length must be non-negative" if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") logger.info("device: {}".format(device)) # Load a pre-processed dataset # You can also build the corpus yourself using TransfoXLCorpus methods # The pre-processing involve computing word frequencies to prepare the Adaptive input and SoftMax # and tokenizing the dataset # The pre-processed corpus is a convertion (using the conversion script ) corpus = TransfoXLCorpus.from_pretrained(args.model_name) va_iter = corpus.get_iterator("valid", args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len) te_iter = corpus.get_iterator("test", args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len) # Load a pre-trained model model = TransfoXLLMHeadModel.from_pretrained(args.model_name) model.to(device) logger.info( "Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}".format( args.batch_size, args.tgt_len, args.ext_len, args.mem_len, args.clamp_len ) ) model.reset_memory_length(args.mem_len) if args.clamp_len > 0: model.clamp_len = args.clamp_len if args.same_length: model.same_length = True ############################################################################### # Evaluation code ############################################################################### def evaluate(eval_iter): # Turn on evaluation mode which disables dropout. model.eval() total_len, total_loss = 0, 0.0 start_time = time.time() with torch.no_grad(): mems = None for idx, (data, target, seq_len) in enumerate(eval_iter): ret = model(data, lm_labels=target, mems=mems) loss, _, mems = ret loss = loss.mean() total_loss += seq_len * loss.item() total_len += seq_len total_time = time.time() - start_time logger.info("Time : {:.2f}s, {:.2f}ms/segment".format(total_time, 1000 * total_time / (idx + 1))) return total_loss / total_len # Run on test data. if args.split == "all": test_loss = evaluate(te_iter) valid_loss = evaluate(va_iter) elif args.split == "valid": valid_loss = evaluate(va_iter) test_loss = None elif args.split == "test": test_loss = evaluate(te_iter) valid_loss = None def format_log(loss, split): log_str = "| {0} loss {1:5.2f} | {0} ppl {2:9.3f} ".format(split, loss, math.exp(loss)) return log_str log_str = "" if valid_loss is not None: log_str += format_log(valid_loss, "valid") if test_loss is not None: log_str += format_log(test_loss, "test") logger.info("=" * 100) logger.info(log_str) logger.info("=" * 100) if __name__ == "__main__": main()
transformers/examples/legacy/run_transfo_xl.py/0
{ "file_path": "transformers/examples/legacy/run_transfo_xl.py", "repo_id": "transformers", "token_count": 2273 }
314
import sys from transformers import AutoTokenizer dataset = sys.argv[1] model_name_or_path = sys.argv[2] max_len = int(sys.argv[3]) subword_len_counter = 0 tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) max_len -= tokenizer.num_special_tokens_to_add() with open(dataset, "rt") as f_p: for line in f_p: line = line.rstrip() if not line: print(line) subword_len_counter = 0 continue token = line.split()[0] current_subwords_len = len(tokenizer.tokenize(token)) # Token contains strange control characters like \x96 or \x95 # Just filter out the complete line if current_subwords_len == 0: continue if (subword_len_counter + current_subwords_len) > max_len: print("") print(line) subword_len_counter = current_subwords_len continue subword_len_counter += current_subwords_len print(line)
transformers/examples/legacy/token-classification/scripts/preprocess.py/0
{ "file_path": "transformers/examples/legacy/token-classification/scripts/preprocess.py", "repo_id": "transformers", "token_count": 452 }
315
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Image pretraining examples This directory contains Python scripts that allow you to pre-train Transformer-based vision models (like [ViT](https://huggingface.co/docs/transformers/model_doc/vit), [Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)) on your own data, after which you can easily load the weights into a [`AutoModelForImageClassification`](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForImageClassification). It currently includes scripts for: - [SimMIM](#simmim) (by Microsoft Research) - [MAE](#mae) (by Facebook AI). NOTE: If you encounter problems/have suggestions for improvement, open an issue on Github and tag @NielsRogge. ## SimMIM The `run_mim.py` script can be used to pre-train any Transformer-based vision model in the library (concretely, any model supported by the `AutoModelForMaskedImageModeling` API) for masked image modeling as proposed in [SimMIM: A Simple Framework for Masked Image Modeling](https://arxiv.org/abs/2111.09886) using PyTorch. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/simmim_architecture.jpg" alt="drawing" width="300"/> <small> SimMIM framework. Taken from the <a href="https://arxiv.org/abs/2111.09886">original paper</a>. </small> The goal for the model is to predict raw pixel values for the masked patches, using just a linear layer as prediction head. The model is trained using a simple L1 loss. ### Using datasets from 🤗 datasets Here we show how to pre-train a `ViT` from scratch for masked image modeling on the [cifar10](https://huggingface.co/datasets/cifar10) dataset. Alternatively, one can decide to further pre-train an already pre-trained (or fine-tuned) checkpoint from the [hub](https://huggingface.co/). This can be done by setting the `model_name_or_path` argument to "google/vit-base-patch16-224-in21k" for example (and not specifying the `model_type` argument). ```bash !python run_mim.py \ --model_type vit \ --output_dir ./outputs/ \ --overwrite_output_dir \ --remove_unused_columns False \ --label_names bool_masked_pos \ --do_train \ --do_eval \ --learning_rate 2e-5 \ --weight_decay 0.05 \ --num_train_epochs 100 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --logging_strategy steps \ --logging_steps 10 \ --eval_strategy epoch \ --save_strategy epoch \ --load_best_model_at_end True \ --save_total_limit 3 \ --seed 1337 ``` Here, we train for 100 epochs with a learning rate of 2e-5. Note that the SimMIM authors used a more sophisticated learning rate schedule, see the [config files](https://github.com/microsoft/SimMIM/blob/main/configs/vit_base__800ep/simmim_pretrain__vit_base__img224__800ep.yaml) for more info. One can easily tweak the script to include this learning rate schedule (several learning rate schedulers are supported via the [training arguments](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments)). We can also for instance replicate the pre-training of a Swin Transformer using the same architecture as used by the SimMIM authors. For this, we first create a custom configuration and save it locally: ```python from transformers import SwinConfig IMAGE_SIZE = 192 PATCH_SIZE = 4 EMBED_DIM = 128 DEPTHS = [2, 2, 18, 2] NUM_HEADS = [4, 8, 16, 32] WINDOW_SIZE = 6 config = SwinConfig( image_size=IMAGE_SIZE, patch_size=PATCH_SIZE, embed_dim=EMBED_DIM, depths=DEPTHS, num_heads=NUM_HEADS, window_size=WINDOW_SIZE, ) config.save_pretrained("path_to_config") ``` Next, we can run the script by providing the path to this custom configuration (replace `path_to_config` below with your path): ```bash !python run_mim.py \ --config_name_or_path path_to_config \ --model_type swin \ --output_dir ./outputs/ \ --overwrite_output_dir \ --remove_unused_columns False \ --label_names bool_masked_pos \ --do_train \ --do_eval \ --learning_rate 2e-5 \ --num_train_epochs 5 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --logging_strategy steps \ --logging_steps 10 \ --eval_strategy epoch \ --save_strategy epoch \ --load_best_model_at_end True \ --save_total_limit 3 \ --seed 1337 ``` This will train a Swin Transformer from scratch. ### Using your own data To use your own dataset, the training script expects the following directory structure: ```bash root/dog/xxx.png root/dog/xxy.png root/dog/[...]/xxz.png root/cat/123.png root/cat/nsdf3.png root/cat/[...]/asd932_.png ``` Note that you can put images in dummy subfolders, whose names will be ignored by default (as labels aren't required). You can also just place all images into a single dummy subfolder. Once you've prepared your dataset, you can run the script like this: ```bash python run_mim.py \ --model_type vit \ --dataset_name nateraw/image-folder \ --train_dir <path-to-train-root> \ --output_dir ./outputs/ \ --remove_unused_columns False \ --label_names bool_masked_pos \ --do_train \ --do_eval ``` ## MAE The `run_mae.py` script can be used to pre-train a Vision Transformer as a masked autoencoder (MAE), as proposed in [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377). The script can be used to train a `ViTMAEForPreTraining` model in the Transformers library, using PyTorch. After self-supervised pre-training, one can load the weights of the encoder directly into a `ViTForImageClassification`. The MAE method allows for learning high-capacity models that generalize well: e.g., a vanilla ViT-Huge model achieves the best accuracy (87.8%) among methods that use only ImageNet-1K data. The goal for the model is to predict raw pixel values for the masked patches. As the model internally masks patches and learns to reconstruct them, there's no need for any labels. The model uses the mean squared error (MSE) between the reconstructed and original images in the pixel space. ### Using datasets from 🤗 `datasets` One can use the following command to pre-train a `ViTMAEForPreTraining` model from scratch on the [cifar10](https://huggingface.co/datasets/cifar10) dataset: ```bash python run_mae.py \ --dataset_name cifar10 \ --output_dir ./vit-mae-demo \ --remove_unused_columns False \ --label_names pixel_values \ --mask_ratio 0.75 \ --norm_pix_loss \ --do_train \ --do_eval \ --base_learning_rate 1.5e-4 \ --lr_scheduler_type cosine \ --weight_decay 0.05 \ --num_train_epochs 800 \ --warmup_ratio 0.05 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --logging_strategy steps \ --logging_steps 10 \ --eval_strategy epoch \ --save_strategy epoch \ --load_best_model_at_end True \ --save_total_limit 3 \ --seed 1337 ``` Here we set: - `mask_ratio` to 0.75 (to mask 75% of the patches for each image) - `norm_pix_loss` to use normalized pixel values as target (the authors reported better representations with this enabled) - `base_learning_rate` to 1.5e-4. Note that the effective learning rate is computed by the [linear schedule](https://arxiv.org/abs/1706.02677): `lr` = `blr` * total training batch size / 256. The total training batch size is computed as `training_args.train_batch_size` * `training_args.gradient_accumulation_steps` * `training_args.world_size`. This replicates the same hyperparameters as used in the original implementation, as shown in the table below. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/mae_pretraining_setting.png" alt="drawing" width="300"/> <small> Original hyperparameters. Taken from the <a href="https://arxiv.org/abs/2111.06377">original paper</a>. </small> Alternatively, one can decide to further pre-train an already pre-trained (or fine-tuned) checkpoint from the [hub](https://huggingface.co/). This can be done by setting the `model_name_or_path` argument to "facebook/vit-mae-base" for example. ### Using your own data To use your own dataset, the training script expects the following directory structure: ```bash root/dog/xxx.png root/dog/xxy.png root/dog/[...]/xxz.png root/cat/123.png root/cat/nsdf3.png root/cat/[...]/asd932_.png ``` Note that you can put images in dummy subfolders, whose names will be ignored by default (as labels aren't required). You can also just place all images into a single dummy subfolder. Once you've prepared your dataset, you can run the script like this: ```bash python run_mae.py \ --model_type vit_mae \ --dataset_name nateraw/image-folder \ --train_dir <path-to-train-root> \ --output_dir ./outputs/ \ --remove_unused_columns False \ --label_names pixel_values \ --do_train \ --do_eval ``` #### 💡 The above will split the train dir into training and evaluation sets - To control the split amount, use the `--train_val_split` flag. - To provide your own validation split in its own directory, you can pass the `--validation_dir <path-to-val-root>` flag. ## Sharing your model on 🤗 Hub 0. If you haven't already, [sign up](https://huggingface.co/join) for a 🤗 account 1. Make sure you have `git-lfs` installed and git set up. ```bash $ apt install git-lfs $ git config --global user.email "you@example.com" $ git config --global user.name "Your Name" ``` 2. Log in with your HuggingFace account credentials using `huggingface-cli` ```bash $ huggingface-cli login # ...follow the prompts ``` 3. When running the script, pass the following arguments: ```bash python run_xxx.py \ --push_to_hub \ --push_to_hub_model_id <name-of-your-model> \ ... ```
transformers/examples/pytorch/image-pretraining/README.md/0
{ "file_path": "transformers/examples/pytorch/image-pretraining/README.md", "repo_id": "transformers", "token_count": 3469 }
316
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning XLNet for question answering with beam search using 🤗 Accelerate. """ # You can also adapt this script on your own question answering task. Pointers for this are left as comments. import argparse import json import logging import math import os import random from pathlib import Path import datasets import evaluate import numpy as np import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from datasets import load_dataset from huggingface_hub import HfApi from torch.utils.data import DataLoader from tqdm.auto import tqdm from utils_qa import postprocess_qa_predictions_with_beam_search import transformers from transformers import ( AdamW, DataCollatorWithPadding, EvalPrediction, SchedulerType, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizerFast, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.45.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt") logger = get_logger(__name__) def save_prefixed_metrics(results, output_dir, file_name: str = "all_results.json", metric_key_prefix: str = "eval"): """ Save results while prefixing metric names. Args: results: (:obj:`dict`): A dictionary of results. output_dir: (:obj:`str`): An output directory. file_name: (:obj:`str`, `optional`, defaults to :obj:`all_results.json`): An output file name. metric_key_prefix: (:obj:`str`, `optional`, defaults to :obj:`eval`): A metric name prefix. """ # Prefix all keys with metric_key_prefix + '_' for key in list(results.keys()): if not key.startswith(f"{metric_key_prefix}_"): results[f"{metric_key_prefix}_{key}"] = results.pop(key) with open(os.path.join(output_dir, file_name), "w") as f: json.dump(results, f, indent=4) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a Question Answering task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--trust_remote_code", action="store_true", help=( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ), ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--preprocessing_num_workers", type=int, default=1, help="A csv or a json file containing the training data." ) parser.add_argument("--do_predict", action="store_true", help="Eval the question answering model") parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--test_file", type=str, default=None, help="A csv or a json file containing the Prediction data." ) parser.add_argument( "--max_seq_length", type=int, default=384, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_length` is passed." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_seq_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--doc_stride", type=int, default=128, help="When splitting up a long document into chunks how much stride to take between chunks.", ) parser.add_argument( "--n_best_size", type=int, default=20, help="The total number of n-best predictions to generate when looking for an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help=( "The threshold used to select the null answer: if the best answer has a score that is less than " "the score of the null answer minus this threshold, the null answer is selected for this example. " "Only useful when `version_2_with_negative=True`." ), ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, some of the examples do not have an answer.", ) parser.add_argument( "--max_answer_length", type=int, default=30, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--max_eval_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ), ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--max_predict_samples", type=int, default=None, help="For debugging purposes or quicker training, truncate the number of prediction examples to this", ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) args = parser.parse_args() # Sanity checks if ( args.dataset_name is None and args.train_file is None and args.validation_file is None and args.test_file is None ): raise ValueError("Need either a dataset name or a training/validation/test file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.test_file is not None: extension = args.test_file.split(".")[-1] assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args def main(): args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_qa_beam_search_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will pick up all supported trackers # in the environment accelerator_log_kwargs = {} if args.with_tracking: accelerator_log_kwargs["log_with"] = args.report_to accelerator_log_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: # Retrieve of infer repo_name repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( args.dataset_name, args.dataset_config_name, trust_remote_code=args.trust_remote_code ) else: data_files = {} if args.train_file is not None: data_files["train"] = args.train_file extension = args.train_file.split(".")[-1] if args.validation_file is not None: data_files["validation"] = args.validation_file extension = args.validation_file.split(".")[-1] if args.test_file is not None: data_files["test"] = args.test_file extension = args.test_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files, field="data") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = XLNetConfig.from_pretrained(args.model_name_or_path) tokenizer = XLNetTokenizerFast.from_pretrained(args.model_name_or_path) model = XLNetForQuestionAnswering.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config ) # Preprocessing the datasets. # Preprocessing is slightly different for training and evaluation. column_names = raw_datasets["train"].column_names question_column_name = "question" if "question" in column_names else column_names[0] context_column_name = "context" if "context" in column_names else column_names[1] answer_column_name = "answers" if "answers" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). pad_on_right = tokenizer.padding_side == "right" if args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(args.max_seq_length, tokenizer.model_max_length) # Training preprocessing def prepare_train_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=max_seq_length, stride=args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, return_token_type_ids=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop("offset_mapping") # The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers). special_tokens = tokenized_examples.pop("special_tokens_mask") # Let's label those examples! tokenized_examples["start_positions"] = [] tokenized_examples["end_positions"] = [] tokenized_examples["is_impossible"] = [] tokenized_examples["cls_index"] = [] tokenized_examples["p_mask"] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples["input_ids"][i] if tokenizer.cls_token_id in input_ids: cls_index = input_ids.index(tokenizer.cls_token_id) elif tokenizer.bos_token_id in input_ids: cls_index = input_ids.index(tokenizer.bos_token_id) else: cls_index = 0 tokenized_examples["cls_index"].append(cls_index) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples["token_type_ids"][i] for k, s in enumerate(special_tokens[i]): if s: sequence_ids[k] = 3 context_idx = 1 if pad_on_right else 0 # Build the p_mask: non special tokens and context gets 0.0, the others get 1.0. # The cls token gets 1.0 too (for predictions of empty answers). tokenized_examples["p_mask"].append( [ 0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0 for k, s in enumerate(sequence_ids) ] ) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples[answer_column_name][sample_index] # If no answers are given, set the cls_index as answer. if len(answers["answer_start"]) == 0: tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) tokenized_examples["is_impossible"].append(1.0) else: # Start/end character index of the answer in the text. start_char = answers["answer_start"][0] end_char = start_char + len(answers["text"][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != context_idx: token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != context_idx: token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) tokenized_examples["is_impossible"].append(1.0) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples["start_positions"].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples["end_positions"].append(token_end_index + 1) tokenized_examples["is_impossible"].append(0.0) return tokenized_examples if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if args.max_train_samples is not None: # We will select sample from whole data if argument is specified train_dataset = train_dataset.select(range(args.max_train_samples)) # Create train feature from dataset with accelerator.main_process_first(): train_dataset = train_dataset.map( prepare_train_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on train dataset", ) if args.max_train_samples is not None: # Number of samples might increase during Feature Creation, We select only specified max samples train_dataset = train_dataset.select(range(args.max_train_samples)) # Validation preprocessing def prepare_validation_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=max_seq_length, stride=args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, return_token_type_ids=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers). special_tokens = tokenized_examples.pop("special_tokens_mask") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. tokenized_examples["example_id"] = [] # We still provide the index of the CLS token and the p_mask to the model, but not the is_impossible label. tokenized_examples["cls_index"] = [] tokenized_examples["p_mask"] = [] for i, input_ids in enumerate(tokenized_examples["input_ids"]): # Find the CLS token in the input ids. if tokenizer.cls_token_id in input_ids: cls_index = input_ids.index(tokenizer.cls_token_id) elif tokenizer.bos_token_id in input_ids: cls_index = input_ids.index(tokenizer.bos_token_id) else: cls_index = 0 tokenized_examples["cls_index"].append(cls_index) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples["token_type_ids"][i] for k, s in enumerate(special_tokens[i]): if s: sequence_ids[k] = 3 context_idx = 1 if pad_on_right else 0 # Build the p_mask: non special tokens and context gets 0.0, the others 1.0. tokenized_examples["p_mask"].append( [ 0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0 for k, s in enumerate(sequence_ids) ] ) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples["offset_mapping"][i] = [ (o if sequence_ids[k] == context_idx else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i]) ] return tokenized_examples if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_examples = raw_datasets["validation"] if args.max_eval_samples is not None: # We will select sample from whole data eval_examples = eval_examples.select(range(args.max_eval_samples)) # Validation Feature Creation with accelerator.main_process_first(): eval_dataset = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if args.max_eval_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again eval_dataset = eval_dataset.select(range(args.max_eval_samples)) if args.do_predict: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_examples = raw_datasets["test"] if args.max_predict_samples is not None: # We will select sample from whole data predict_examples = predict_examples.select(range(args.max_predict_samples)) # Predict Feature Creation with accelerator.main_process_first(): predict_dataset = predict_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) if args.max_predict_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again predict_dataset = predict_dataset.select(range(args.max_predict_samples)) # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # DataLoaders creation: if args.pad_to_max_length: # If padding was already done ot max length, we use the default data collator that will just convert everything # to tensors. data_collator = default_data_collator else: # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of # the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple # of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=data_collator, batch_size=args.per_device_train_batch_size ) eval_dataset_for_model = eval_dataset.remove_columns(["example_id", "offset_mapping"]) eval_dataloader = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) if args.do_predict: predict_dataset_for_model = predict_dataset.remove_columns(["example_id", "offset_mapping"]) predict_dataloader = DataLoader( predict_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) # Post-processing: def post_processing_function(examples, features, predictions, stage="eval"): # Post-processing: we match the start logits and end logits to answers in the original context. predictions, scores_diff_json = postprocess_qa_predictions_with_beam_search( examples=examples, features=features, predictions=predictions, version_2_with_negative=args.version_2_with_negative, n_best_size=args.n_best_size, max_answer_length=args.max_answer_length, start_n_top=model.config.start_n_top, end_n_top=model.config.end_n_top, output_dir=args.output_dir, prefix=stage, ) # Format the result to the format the metric expects. if args.version_2_with_negative: formatted_predictions = [ {"id": k, "prediction_text": v, "no_answer_probability": scores_diff_json[k]} for k, v in predictions.items() ] else: formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) metric = evaluate.load("squad_v2" if args.version_2_with_negative else "squad") def create_and_fill_np_array(start_or_end_logits, dataset, max_len): """ Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor Args: start_or_end_logits(:obj:`tensor`): This is the output predictions of the model. We can only enter either start or end logits. eval_dataset: Evaluation dataset max_len(:obj:`int`): The maximum length of the output tensor. ( See the model.eval() part for more details ) """ step = 0 # create a numpy array and fill it with -100. logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32) # Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather_for_metrics for i, output_logit in enumerate(start_or_end_logits): # populate columns # We have to fill it such that we have to take the whole tensor and replace it on the newly created array # And after every iteration we have to change the step batch_size = output_logit.shape[0] cols = output_logit.shape[1] if step + batch_size < len(dataset): logits_concat[step : step + batch_size, :cols] = output_logit else: logits_concat[step:, :cols] = output_logit[: len(dataset) - step] step += batch_size return logits_concat # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps if overrode_max_train_steps else args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("qa_beam_search_no_trainer", experiment_config) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": checkpoint_path = args.resume_from_checkpoint path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last checkpoint_path = path path = os.path.basename(checkpoint_path) accelerator.print(f"Resumed from checkpoint: {checkpoint_path}") accelerator.load_state(checkpoint_path) # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps resume_step -= starting_epoch * len(train_dataloader) # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients: accelerator.save_state(f"step_{completed_steps}") if completed_steps >= args.max_train_steps: break if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) # initialize all lists to collect the batches all_start_top_log_probs = [] all_start_top_index = [] all_end_top_log_probs = [] all_end_top_index = [] all_cls_logits = [] model.eval() for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) start_top_log_probs = outputs.start_top_log_probs start_top_index = outputs.start_top_index end_top_log_probs = outputs.end_top_log_probs end_top_index = outputs.end_top_index cls_logits = outputs.cls_logits if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered start_top_log_probs = accelerator.pad_across_processes(start_top_log_probs, dim=1, pad_index=-100) start_top_index = accelerator.pad_across_processes(start_top_index, dim=1, pad_index=-100) end_top_log_probs = accelerator.pad_across_processes(end_top_log_probs, dim=1, pad_index=-100) end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100) cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100) all_start_top_log_probs.append(accelerator.gather_for_metrics(start_top_log_probs).cpu().numpy()) all_start_top_index.append(accelerator.gather_for_metrics(start_top_index).cpu().numpy()) all_end_top_log_probs.append(accelerator.gather_for_metrics(end_top_log_probs).cpu().numpy()) all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy()) all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy()) max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor # concatenate all numpy arrays collected above start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, eval_dataset, max_len) start_top_index_concat = create_and_fill_np_array(all_start_top_index, eval_dataset, max_len) end_top_log_probs_concat = create_and_fill_np_array(all_end_top_log_probs, eval_dataset, max_len) end_top_index_concat = create_and_fill_np_array(all_end_top_index, eval_dataset, max_len) cls_logits_concat = np.concatenate(all_cls_logits, axis=0) # delete the list of numpy arrays del start_top_log_probs del start_top_index del end_top_log_probs del end_top_index del cls_logits outputs_numpy = ( start_top_log_probs_concat, start_top_index_concat, end_top_log_probs_concat, end_top_index_concat, cls_logits_concat, ) prediction = post_processing_function(eval_examples, eval_dataset, outputs_numpy) eval_metric = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f"Evaluation metrics: {eval_metric}") if args.do_predict: # initialize all lists to collect the batches all_start_top_log_probs = [] all_start_top_index = [] all_end_top_log_probs = [] all_end_top_index = [] all_cls_logits = [] model.eval() for step, batch in enumerate(predict_dataloader): with torch.no_grad(): outputs = model(**batch) start_top_log_probs = outputs.start_top_log_probs start_top_index = outputs.start_top_index end_top_log_probs = outputs.end_top_log_probs end_top_index = outputs.end_top_index cls_logits = outputs.cls_logits if not args.pad_to_max_length: # necessary to pad predictions and labels for being gathered start_top_log_probs = accelerator.pad_across_processes(start_top_log_probs, dim=1, pad_index=-100) start_top_index = accelerator.pad_across_processes(start_top_index, dim=1, pad_index=-100) end_top_log_probs = accelerator.pad_across_processes(end_top_log_probs, dim=1, pad_index=-100) end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100) cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100) all_start_top_log_probs.append(accelerator.gather_for_metrics(start_top_log_probs).cpu().numpy()) all_start_top_index.append(accelerator.gather_for_metrics(start_top_index).cpu().numpy()) all_end_top_log_probs.append(accelerator.gather_for_metrics(end_top_log_probs).cpu().numpy()) all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy()) all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy()) max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor # concatenate all numpy arrays collected above start_top_log_probs_concat = create_and_fill_np_array(all_start_top_log_probs, predict_dataset, max_len) start_top_index_concat = create_and_fill_np_array(all_start_top_index, predict_dataset, max_len) end_top_log_probs_concat = create_and_fill_np_array(all_end_top_log_probs, predict_dataset, max_len) end_top_index_concat = create_and_fill_np_array(all_end_top_index, predict_dataset, max_len) cls_logits_concat = np.concatenate(all_cls_logits, axis=0) # delete the list of numpy arrays del start_top_log_probs del start_top_index del end_top_log_probs del end_top_index del cls_logits outputs_numpy = ( start_top_log_probs_concat, start_top_index_concat, end_top_log_probs_concat, end_top_index_concat, cls_logits_concat, ) prediction = post_processing_function(predict_examples, predict_dataset, outputs_numpy) predict_metric = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(f"Predict metrics: {predict_metric}") if args.with_tracking: log = { "squad_v2" if args.version_2_with_negative else "squad": eval_metric, "train_loss": total_loss, "epoch": epoch, "step": completed_steps, } if args.do_predict: log["squad_v2_predict" if args.version_2_with_negative else "squad_predict"] = predict_metric accelerator.log(log) if args.checkpointing_steps == "epoch": accelerator.save_state(f"epoch_{epoch}") if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: tokenizer.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( commit_message="End of training", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) logger.info(json.dumps(eval_metric, indent=4)) save_prefixed_metrics(eval_metric, args.output_dir) if __name__ == "__main__": main()
transformers/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py/0
{ "file_path": "transformers/examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py", "repo_id": "transformers", "token_count": 20438 }
317
#!/usr/bin/env python # coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet)""" import argparse import inspect import logging from typing import Tuple import torch from accelerate import PartialState from accelerate.utils import set_seed from transformers import ( AutoTokenizer, BloomForCausalLM, BloomTokenizerFast, CTRLLMHeadModel, CTRLTokenizer, GenerationMixin, GPT2LMHeadModel, GPT2Tokenizer, GPTJForCausalLM, LlamaForCausalLM, LlamaTokenizer, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, OPTForCausalLM, TransfoXLLMHeadModel, TransfoXLTokenizer, XLMTokenizer, XLMWithLMHeadModel, XLNetLMHeadModel, XLNetTokenizer, ) from transformers.modeling_outputs import CausalLMOutputWithPast logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger = logging.getLogger(__name__) MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop MODEL_CLASSES = { "gpt2": (GPT2LMHeadModel, GPT2Tokenizer), "ctrl": (CTRLLMHeadModel, CTRLTokenizer), "openai-gpt": (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer), "xlnet": (XLNetLMHeadModel, XLNetTokenizer), "transfo-xl": (TransfoXLLMHeadModel, TransfoXLTokenizer), "xlm": (XLMWithLMHeadModel, XLMTokenizer), "gptj": (GPTJForCausalLM, AutoTokenizer), "bloom": (BloomForCausalLM, BloomTokenizerFast), "llama": (LlamaForCausalLM, LlamaTokenizer), "opt": (OPTForCausalLM, GPT2Tokenizer), } # Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia # in https://github.com/rusiaaman/XLNet-gen#methodology # and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e PREFIX = """In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos>""" # # Functions to prepare models' input # def prepare_ctrl_input(args, _, tokenizer, prompt_text): if args.temperature > 0.7: logger.info("CTRL typically works better with lower temperatures (and lower top_k).") encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False) if not any(encoded_prompt[0] == x for x in tokenizer.control_codes.values()): logger.info("WARNING! You are not starting your generation from a control code so you won't get good results") return prompt_text def prepare_xlm_input(args, model, tokenizer, prompt_text): # kwargs = {"language": None, "mask_token_id": None} # Set the language use_lang_emb = hasattr(model.config, "use_lang_emb") and model.config.use_lang_emb if hasattr(model.config, "lang2id") and use_lang_emb: available_languages = model.config.lang2id.keys() if args.xlm_language in available_languages: language = args.xlm_language else: language = None while language not in available_languages: language = input("Using XLM. Select language in " + str(list(available_languages)) + " >>> ") model.config.lang_id = model.config.lang2id[language] # kwargs["language"] = tokenizer.lang2id[language] # TODO fix mask_token_id setup when configurations will be synchronized between models and tokenizers # XLM masked-language modeling (MLM) models need masked token # is_xlm_mlm = "mlm" in args.model_name_or_path # if is_xlm_mlm: # kwargs["mask_token_id"] = tokenizer.mask_token_id return prompt_text def prepare_xlnet_input(args, _, tokenizer, prompt_text): prefix = args.prefix if args.prefix else args.padding_text if args.padding_text else PREFIX prompt_text = prefix + prompt_text return prompt_text def prepare_transfoxl_input(args, _, tokenizer, prompt_text): prefix = args.prefix if args.prefix else args.padding_text if args.padding_text else PREFIX prompt_text = prefix + prompt_text return prompt_text PREPROCESSING_FUNCTIONS = { "ctrl": prepare_ctrl_input, "xlm": prepare_xlm_input, "xlnet": prepare_xlnet_input, "transfo-xl": prepare_transfoxl_input, } def adjust_length_to_model(length, max_sequence_length): if length < 0 and max_sequence_length > 0: length = max_sequence_length elif 0 < max_sequence_length < length: length = max_sequence_length # No generation bigger than model size elif length < 0: length = MAX_LENGTH # avoid infinite loop return length def sparse_model_config(model_config): embedding_size = None if hasattr(model_config, "hidden_size"): embedding_size = model_config.hidden_size elif hasattr(model_config, "n_embed"): embedding_size = model_config.n_embed elif hasattr(model_config, "n_embd"): embedding_size = model_config.n_embd num_head = None if hasattr(model_config, "num_attention_heads"): num_head = model_config.num_attention_heads elif hasattr(model_config, "n_head"): num_head = model_config.n_head if embedding_size is None or num_head is None or num_head == 0: raise ValueError("Check the model config") num_embedding_size_per_head = int(embedding_size / num_head) if hasattr(model_config, "n_layer"): num_layer = model_config.n_layer elif hasattr(model_config, "num_hidden_layers"): num_layer = model_config.num_hidden_layers else: raise ValueError("Number of hidden layers couldn't be determined from the model config") return num_layer, num_head, num_embedding_size_per_head def generate_past_key_values(model, batch_size, seq_len): num_block_layers, num_attention_heads, num_embedding_size_per_head = sparse_model_config(model.config) if model.config.model_type == "bloom": past_key_values = tuple( ( torch.empty(int(num_attention_heads * batch_size), num_embedding_size_per_head, seq_len) .to(model.dtype) .to(model.device), torch.empty(int(num_attention_heads * batch_size), seq_len, num_embedding_size_per_head) .to(model.dtype) .to(model.device), ) for _ in range(num_block_layers) ) else: past_key_values = tuple( ( torch.empty(batch_size, num_attention_heads, seq_len, num_embedding_size_per_head) .to(model.dtype) .to(model.device), torch.empty(batch_size, num_attention_heads, seq_len, num_embedding_size_per_head) .to(model.dtype) .to(model.device), ) for _ in range(num_block_layers) ) return past_key_values def prepare_jit_inputs(inputs, model, tokenizer): batch_size = len(inputs) dummy_input = tokenizer.batch_encode_plus(inputs, return_tensors="pt") dummy_input = dummy_input.to(model.device) if model.config.use_cache: dummy_input["past_key_values"] = generate_past_key_values(model, batch_size, 1) dummy_input["attention_mask"] = torch.cat( [ torch.zeros(dummy_input["attention_mask"].shape[0], 1) .to(dummy_input["attention_mask"].dtype) .to(model.device), dummy_input["attention_mask"], ], -1, ) return dummy_input class _ModelFallbackWrapper(GenerationMixin): __slots__ = ("_optimized", "_default") def __init__(self, optimized, default): self._optimized = optimized self._default = default def __call__(self, *args, **kwargs): if kwargs["past_key_values"] is None and self._default.config.use_cache: kwargs["past_key_values"] = generate_past_key_values(self._default, kwargs["input_ids"].shape[0], 0) kwargs.pop("position_ids", None) for k in list(kwargs.keys()): if kwargs[k] is None or isinstance(kwargs[k], bool): kwargs.pop(k) outputs = self._optimized(**kwargs) lm_logits = outputs[0] past_key_values = outputs[1] fixed_output = CausalLMOutputWithPast( loss=None, logits=lm_logits, past_key_values=past_key_values, hidden_states=None, attentions=None, ) return fixed_output def __getattr__(self, item): return getattr(self._default, item) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, inputs_embeds=None, use_cache=None, **kwargs ): return self._default.prepare_inputs_for_generation( input_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, **kwargs ) def _reorder_cache( self, past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor ) -> Tuple[Tuple[torch.Tensor]]: """ This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step. """ return self._default._reorder_cache(past_key_values, beam_idx) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument("--prompt", type=str, default="") parser.add_argument("--length", type=int, default=20) parser.add_argument("--stop_token", type=str, default=None, help="Token at which text generation is stopped") parser.add_argument( "--temperature", type=float, default=1.0, help="temperature of 1.0 has no effect, lower tend toward greedy sampling", ) parser.add_argument( "--repetition_penalty", type=float, default=1.0, help="primarily useful for CTRL model; in that case, use 1.2" ) parser.add_argument("--k", type=int, default=0) parser.add_argument("--p", type=float, default=0.9) parser.add_argument("--prefix", type=str, default="", help="Text added prior to input.") parser.add_argument("--padding_text", type=str, default="", help="Deprecated, the use of `--prefix` is preferred.") parser.add_argument("--xlm_language", type=str, default="", help="Optional language when used with the XLM model.") parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--use_cpu", action="store_true", help="Whether or not to use cpu. If set to False, " "we will use gpu/npu or mps device if available", ) parser.add_argument("--num_return_sequences", type=int, default=1, help="The number of samples to generate.") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument("--jit", action="store_true", help="Whether or not to use jit trace to accelerate inference") args = parser.parse_args() # Initialize the distributed state. distributed_state = PartialState(cpu=args.use_cpu) logger.warning(f"device: {distributed_state.device}, 16-bits inference: {args.fp16}") if args.seed is not None: set_seed(args.seed) # Initialize the model and tokenizer try: args.model_type = args.model_type.lower() model_class, tokenizer_class = MODEL_CLASSES[args.model_type] except KeyError: raise KeyError("the model {} you specified is not supported. You are welcome to add it and open a PR :)") tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token model = model_class.from_pretrained(args.model_name_or_path) # Set the model to the right device model.to(distributed_state.device) if args.fp16: model.half() max_seq_length = getattr(model.config, "max_position_embeddings", 0) args.length = adjust_length_to_model(args.length, max_sequence_length=max_seq_length) logger.info(args) prompt_text = args.prompt if args.prompt else input("Model prompt >>> ") # Different models need different input formatting and/or extra arguments requires_preprocessing = args.model_type in PREPROCESSING_FUNCTIONS.keys() if requires_preprocessing: prepare_input = PREPROCESSING_FUNCTIONS.get(args.model_type) preprocessed_prompt_text = prepare_input(args, model, tokenizer, prompt_text) if model.__class__.__name__ in ["TransfoXLLMHeadModel"]: tokenizer_kwargs = {"add_space_before_punct_symbol": True} else: tokenizer_kwargs = {} encoded_prompt = tokenizer.encode( preprocessed_prompt_text, add_special_tokens=False, return_tensors="pt", **tokenizer_kwargs ) else: prefix = args.prefix if args.prefix else args.padding_text encoded_prompt = tokenizer.encode(prefix + prompt_text, add_special_tokens=False, return_tensors="pt") encoded_prompt = encoded_prompt.to(distributed_state.device) if encoded_prompt.size()[-1] == 0: input_ids = None else: input_ids = encoded_prompt if args.jit: jit_input_texts = ["enable jit"] jit_inputs = prepare_jit_inputs(jit_input_texts, model, tokenizer) torch._C._jit_set_texpr_fuser_enabled(False) model.config.return_dict = False if hasattr(model, "forward"): sig = inspect.signature(model.forward) else: sig = inspect.signature(model.__call__) jit_inputs = tuple(jit_inputs[key] for key in sig.parameters if jit_inputs.get(key, None) is not None) traced_model = torch.jit.trace(model, jit_inputs, strict=False) traced_model = torch.jit.freeze(traced_model.eval()) traced_model(*jit_inputs) traced_model(*jit_inputs) model = _ModelFallbackWrapper(traced_model, model) output_sequences = model.generate( input_ids=input_ids, max_length=args.length + len(encoded_prompt[0]), temperature=args.temperature, top_k=args.k, top_p=args.p, repetition_penalty=args.repetition_penalty, do_sample=True, num_return_sequences=args.num_return_sequences, ) # Remove the batch dimension when returning multiple sequences if len(output_sequences.shape) > 2: output_sequences.squeeze_() generated_sequences = [] for generated_sequence_idx, generated_sequence in enumerate(output_sequences): print(f"=== GENERATED SEQUENCE {generated_sequence_idx + 1} ===") generated_sequence = generated_sequence.tolist() # Decode text text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True) # Remove all text after the stop token text = text[: text.find(args.stop_token) if args.stop_token else None] # Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing total_sequence = ( prompt_text + text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True)) :] ) generated_sequences.append(total_sequence) print(total_sequence) return generated_sequences if __name__ == "__main__": main()
transformers/examples/pytorch/text-generation/run_generation.py/0
{ "file_path": "transformers/examples/pytorch/text-generation/run_generation.py", "repo_id": "transformers", "token_count": 6876 }
318
# coding=utf-8 # Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class SummarizationDataProcessingTest(unittest.TestCase): def setUp(self): self.block_size = 10 def test_fit_to_block_sequence_too_small(self): """Pad the sequence with 0 if the sequence is smaller than the block size.""" sequence = [1, 2, 3, 4] expected_output = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(sequence, self.block_size, 0), expected_output) def test_fit_to_block_sequence_fit_exactly(self): """Do nothing if the sequence is the right size.""" sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(sequence, self.block_size, 0), expected_output) def test_fit_to_block_sequence_too_big(self): """Truncate the sequence if it is too long.""" sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] expected_output = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(sequence, self.block_size, 0), expected_output) def test_process_story_no_highlights(self): """Processing a story with no highlights returns an empty list for the summary.""" raw_story = """It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.""" _, summary_lines = process_story(raw_story) self.assertEqual(summary_lines, []) def test_process_empty_story(self): """An empty story returns an empty collection of lines.""" raw_story = "" story_lines, summary_lines = process_story(raw_story) self.assertEqual(story_lines, []) self.assertEqual(summary_lines, []) def test_process_story_with_missing_period(self): raw_story = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) story_lines, summary_lines = process_story(raw_story) expected_story_lines = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(expected_story_lines, story_lines) expected_summary_lines = ["It was the best of times."] self.assertEqual(expected_summary_lines, summary_lines) def test_build_mask_no_padding(self): sequence = torch.tensor([1, 2, 3, 4]) expected = torch.tensor([1, 1, 1, 1]) np.testing.assert_array_equal(build_mask(sequence, 0).numpy(), expected.numpy()) def test_build_mask(self): sequence = torch.tensor([1, 2, 3, 4, 23, 23, 23]) expected = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(sequence, 23).numpy(), expected.numpy()) def test_build_mask_with_padding_equal_to_one(self): sequence = torch.tensor([8, 2, 3, 4, 1, 1, 1]) expected = torch.tensor([1, 1, 1, 1, 0, 0, 0]) np.testing.assert_array_equal(build_mask(sequence, 1).numpy(), expected.numpy()) def test_compute_token_type_ids(self): separator = 101 batch = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]]) expected = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]]) result = compute_token_type_ids(batch, separator) np.testing.assert_array_equal(result, expected)
transformers/examples/research_projects/bertabs/test_utils_summarization.py/0
{ "file_path": "transformers/examples/research_projects/bertabs/test_utils_summarization.py", "repo_id": "transformers", "token_count": 1749 }
319
import gzip import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from huggingface_hub.utils import insecure_hashlib from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser PATTERN = re.compile(r"\s+") def get_hash(example): """Get hash of content field.""" return {"hash": insecure_hashlib.md5(re.sub(PATTERN, "", example["content"]).encode("utf-8")).hexdigest()} def line_stats(example): """Calculates mean and max line length of file.""" line_lengths = [len(line) for line in example["content"].splitlines()] return {"line_mean": np.mean(line_lengths), "line_max": max(line_lengths)} def alpha_stats(example): """Calculates mean and max line length of file.""" alpha_frac = np.mean([c.isalnum() for c in example["content"]]) return {"alpha_frac": alpha_frac} def check_uniques(example, uniques): """Check if current hash is still in set of unique hashes and remove if true.""" if example["hash"] in uniques: uniques.remove(example["hash"]) return True else: return False def is_autogenerated(example, scan_width=5): """Check if file is autogenerated by looking for keywords in the first few lines of the file.""" keywords = ["auto-generated", "autogenerated", "automatically generated"] lines = example["content"].splitlines() for _, line in zip(range(scan_width), lines): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def is_config_or_test(example, scan_width=5, coeff=0.05): """Check if file is a configuration file or a unit test by : 1- looking for keywords in the first few lines of the file. 2- counting number of occurrence of the words 'config' and 'test' with respect to number of lines. """ keywords = ["unit tests", "test file", "configuration file"] lines = example["content"].splitlines() count_config = 0 count_test = 0 # first test for _, line in zip(range(scan_width), lines): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test nlines = example["content"].count("\n") threshold = int(coeff * nlines) for line in lines: count_config += line.lower().count("config") count_test += line.lower().count("test") if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def has_no_keywords(example): """Check if a python file has none of the keywords for: funcion, class, for loop, while loop.""" keywords = ["def ", "class ", "for ", "while "] lines = example["content"].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def has_few_assignments(example, minimum=4): """Check if file uses symbol '=' less than `minimum` times.""" lines = example["content"].splitlines() counter = 0 for line in lines: counter += line.lower().count("=") if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def char_token_ratio(example): """Compute character/token ratio of the file with tokenizer.""" input_ids = tokenizer(example["content"], truncation=False)["input_ids"] ratio = len(example["content"]) / len(input_ids) return {"ratio": ratio} def preprocess(example): """Chain all preprocessing steps into one function to not fill cache.""" results = {} results.update(get_hash(example)) results.update(line_stats(example)) results.update(alpha_stats(example)) results.update(char_token_ratio(example)) results.update(is_autogenerated(example)) results.update(is_config_or_test(example)) results.update(has_no_keywords(example)) results.update(has_few_assignments(example)) return results def filter(example, uniques, args): """Filter dataset with heuristics. Config, test and has_no_keywords files are removed with a given probability.""" if not check_uniques(example, uniques): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def compress_file(file_path): """Compress a file with g-zip.""" with open(file_path, "rb") as f_in: with gzip.open(str(file_path) + ".gz", "wb", compresslevel=6) as f_out: shutil.copyfileobj(f_in, f_out) os.unlink(file_path) # Settings parser = HfArgumentParser(PreprocessingArguments) args = parser.parse_args() if args.num_workers is None: args.num_workers = multiprocessing.cpu_count() tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset t_start = time.time() ds = load_dataset(args.dataset_name, split="train") print(f"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing t_start = time.time() ds = ds.map(preprocess, num_proc=args.num_workers) print(f"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes uniques = set(ds.unique("hash")) frac = len(uniques) / len(ds) print(f"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics t_start = time.time() ds_filter = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args}) print(f"Time to filter dataset: {time.time()-t_start:.2f}") print(f"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: t_start = time.time() ds_filter, duplicate_clusters = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(f"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file output_dir = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / "duplicate_clusters.json", "w") as f: json.dump(duplicate_clusters, f) data_dir = output_dir / "data" data_dir.mkdir(exist_ok=True) t_start = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): file_path = str(data_dir / f"file-{file_number+1:012}.json") end_index = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(f"Time to save dataset: {time.time()-t_start:.2f}")
transformers/examples/research_projects/codeparrot/scripts/preprocessing.py/0
{ "file_path": "transformers/examples/research_projects/codeparrot/scripts/preprocessing.py", "repo_id": "transformers", "token_count": 2776 }
320
#!/bin/bash export CUDA_VISIBLE_DEVICES=0 PATH_TO_DATA=/h/xinji/projects/GLUE MODEL_TYPE=bert # bert or roberta MODEL_SIZE=base # base or large DATASET=MRPC # SST-2, MRPC, RTE, QNLI, QQP, or MNLI MODEL_NAME=${MODEL_TYPE}-${MODEL_SIZE} EPOCHS=10 if [ $MODEL_TYPE = 'bert' ] then EPOCHS=3 MODEL_NAME=${MODEL_NAME}-uncased fi python -u run_glue_deebert.py \ --model_type $MODEL_TYPE \ --model_name_or_path $MODEL_NAME \ --task_name $DATASET \ --do_train \ --do_eval \ --do_lower_case \ --data_dir $PATH_TO_DATA/$DATASET \ --max_seq_length 128 \ --per_gpu_eval_batch_size=1 \ --per_gpu_train_batch_size=8 \ --learning_rate 2e-5 \ --num_train_epochs $EPOCHS \ --overwrite_output_dir \ --seed 42 \ --output_dir ./saved_models/${MODEL_TYPE}-${MODEL_SIZE}/$DATASET/two_stage \ --plot_data_dir ./results/ \ --save_steps 0 \ --overwrite_cache \ --eval_after_first_stage
transformers/examples/research_projects/deebert/train_deebert.sh/0
{ "file_path": "transformers/examples/research_projects/deebert/train_deebert.sh", "repo_id": "transformers", "token_count": 417 }
321
{ "vocab_size": 50265, "hidden_size": 768, "num_hidden_layers": 6, "num_attention_heads": 12, "intermediate_size": 3072, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "attention_probs_dropout_prob": 0.1, "max_position_embeddings": 514, "type_vocab_size": 1, "initializer_range": 0.02, "layer_norm_eps": 0.00001 }
transformers/examples/research_projects/distillation/training_configs/distilroberta-base.json/0
{ "file_path": "transformers/examples/research_projects/distillation/training_configs/distilroberta-base.json", "repo_id": "transformers", "token_count": 178 }
322
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The Google Research Authors and The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for constructing PyTrees of PartitionSpecs.""" # utils adapted from https://github.com/google-research/google-research/blob/master/flax_models/t5x/partitions.py import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _unmatched = object() # For specifying empty leaf dict `{}` empty_dict = object() def _match(qs, ks): """Return True if regexes in qs match any window of strings in tuple ks.""" # compile regexes and force complete match qts = tuple((re.compile(x + "$") for x in qs)) for i in range(len(ks) - len(qs) + 1): matches = [x.match(y) for x, y in zip(qts, ks[i:])] if matches and all(matches): return True return False def _replacement_rules(rules): def replace(key, val): for rule, replacement in rules: if _match(rule, key): return replacement return val return replace # PartitionSpec for GPTNeo # replicate the hidden dim and shard feed-forward and head dim def _get_partition_rules(): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp", None)), (("transformer", "wte", "embedding"), P("mp", None)), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")), (("attention", "out_proj", "kernel"), P("mp", None)), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(None, "mp")), (("mlp", "c_fc", "bias"), P("mp")), (("mlp", "c_proj", "kernel"), P("mp", None)), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def set_partitions(in_dict): rules = _get_partition_rules() replace = _replacement_rules(rules) initd = {k: _unmatched for k in flatten_dict(in_dict)} result = {k: replace(k, v) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(result))
transformers/examples/research_projects/jax-projects/model_parallel/partitions.py/0
{ "file_path": "transformers/examples/research_projects/jax-projects/model_parallel/partitions.py", "repo_id": "transformers", "token_count": 1130 }
323
import getopt import json import os # import numpy as np import sys from collections import OrderedDict import datasets import numpy as np import torch from modeling_frcnn import GeneralizedRCNN from processing_image import Preprocess from utils import Config """ USAGE: ``python extracting_data.py -i <img_dir> -o <dataset_file>.datasets <batch_size>`` """ TEST = False CONFIG = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned") DEFAULT_SCHEMA = datasets.Features( OrderedDict( { "attr_ids": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), "attr_probs": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), "boxes": datasets.Array2D((CONFIG.MAX_DETECTIONS, 4), dtype="float32"), "img_id": datasets.Value("int32"), "obj_ids": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), "obj_probs": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), "roi_features": datasets.Array2D((CONFIG.MAX_DETECTIONS, 2048), dtype="float32"), "sizes": datasets.Sequence(length=2, feature=datasets.Value("float32")), "preds_per_image": datasets.Value(dtype="int32"), } ) ) class Extract: def __init__(self, argv=sys.argv[1:]): inputdir = None outputfile = None subset_list = None batch_size = 1 opts, args = getopt.getopt(argv, "i:o:b:s", ["inputdir=", "outfile=", "batch_size=", "subset_list="]) for opt, arg in opts: if opt in ("-i", "--inputdir"): inputdir = arg elif opt in ("-o", "--outfile"): outputfile = arg elif opt in ("-b", "--batch_size"): batch_size = int(arg) elif opt in ("-s", "--subset_list"): subset_list = arg assert inputdir is not None # and os.path.isdir(inputdir), f"{inputdir}" assert outputfile is not None and not os.path.isfile(outputfile), f"{outputfile}" if subset_list is not None: with open(os.path.realpath(subset_list)) as f: self.subset_list = {self._vqa_file_split()[0] for x in tryload(f)} else: self.subset_list = None self.config = CONFIG if torch.cuda.is_available(): self.config.model.device = "cuda" self.inputdir = os.path.realpath(inputdir) self.outputfile = os.path.realpath(outputfile) self.preprocess = Preprocess(self.config) self.model = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=self.config) self.batch = batch_size if batch_size != 0 else 1 self.schema = DEFAULT_SCHEMA def _vqa_file_split(self, file): img_id = int(file.split(".")[0].split("_")[-1]) filepath = os.path.join(self.inputdir, file) return (img_id, filepath) @property def file_generator(self): batch = [] for i, file in enumerate(os.listdir(self.inputdir)): if self.subset_list is not None and i not in self.subset_list: continue batch.append(self._vqa_file_split(file)) if len(batch) == self.batch: temp = batch batch = [] yield list(map(list, zip(*temp))) for i in range(1): yield list(map(list, zip(*batch))) def __call__(self): # make writer if not TEST: writer = datasets.ArrowWriter(features=self.schema, path=self.outputfile) # do file generator for i, (img_ids, filepaths) in enumerate(self.file_generator): images, sizes, scales_yx = self.preprocess(filepaths) output_dict = self.model( images, sizes, scales_yx=scales_yx, padding="max_detections", max_detections=self.config.MAX_DETECTIONS, pad_value=0, return_tensors="np", location="cpu", ) output_dict["boxes"] = output_dict.pop("normalized_boxes") if not TEST: output_dict["img_id"] = np.array(img_ids) batch = self.schema.encode_batch(output_dict) writer.write_batch(batch) if TEST: break # finalizer the writer if not TEST: num_examples, num_bytes = writer.finalize() print(f"Success! You wrote {num_examples} entry(s) and {num_bytes >> 20} mb") def tryload(stream): try: data = json.load(stream) try: data = list(data.keys()) except Exception: data = [d["img_id"] for d in data] except Exception: try: data = eval(stream.read()) except Exception: data = stream.read().split("\n") return data if __name__ == "__main__": extract = Extract(sys.argv[1:]) extract() if not TEST: dataset = datasets.Dataset.from_file(extract.outputfile) # wala! # print(np.array(dataset[0:2]["roi_features"]).shape)
transformers/examples/research_projects/lxmert/extracting_data.py/0
{ "file_path": "transformers/examples/research_projects/lxmert/extracting_data.py", "repo_id": "transformers", "token_count": 2528 }
324
# Copyright 2020-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Count remaining (non-zero) weights in the encoder (i.e. the transformer layers). Sparsity and remaining weights levels are equivalent: sparsity % = 100 - remaining weights %. """ import argparse import os import torch from emmental.modules import ThresholdBinarizer, TopKBinarizer def main(args): serialization_dir = args.serialization_dir pruning_method = args.pruning_method threshold = args.threshold st = torch.load(os.path.join(serialization_dir, "pytorch_model.bin"), map_location="cpu") remaining_count = 0 # Number of remaining (not pruned) params in the encoder encoder_count = 0 # Number of params in the encoder print("name".ljust(60, " "), "Remaining Weights %", "Remaining Weight") for name, param in st.items(): if "encoder" not in name: continue if "mask_scores" in name: if pruning_method == "topK": mask_ones = TopKBinarizer.apply(param, threshold).sum().item() elif pruning_method == "sigmoied_threshold": mask_ones = ThresholdBinarizer.apply(param, threshold, True).sum().item() elif pruning_method == "l0": l, r = -0.1, 1.1 s = torch.sigmoid(param) s_bar = s * (r - l) + l mask = s_bar.clamp(min=0.0, max=1.0) mask_ones = (mask > 0.0).sum().item() else: raise ValueError("Unknown pruning method") remaining_count += mask_ones print(name.ljust(60, " "), str(round(100 * mask_ones / param.numel(), 3)).ljust(20, " "), str(mask_ones)) else: encoder_count += param.numel() if "bias" in name or "LayerNorm" in name: remaining_count += param.numel() print("") print("Remaining Weights (global) %: ", 100 * remaining_count / encoder_count) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, topK = Movement pruning, sigmoied_threshold = Soft movement" " pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `topK`, it is the level of remaining weights (in %) in the fine-pruned model. " "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared. " "Not needed for `l0`" ), ) parser.add_argument( "--serialization_dir", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) args = parser.parse_args() main(args)
transformers/examples/research_projects/movement-pruning/counts_parameters.py/0
{ "file_path": "transformers/examples/research_projects/movement-pruning/counts_parameters.py", "repo_id": "transformers", "token_count": 1429 }
325
# coding=utf-8 # Copyright 2021 NVIDIA Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for training models with pytorch-quantization""" import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor logger = logging.getLogger(__name__) name_width = 50 # max width of layer names qname_width = 70 # max width of quantizer names # ========================================== Quant Trainer API ========================================== def add_arguments(parser): """Add arguments to parser for functions defined in quant_trainer.""" group = parser.add_argument_group("quant_trainer arguments") group.add_argument("--wprec", type=int, default=8, help="weight precision") group.add_argument("--aprec", type=int, default=8, help="activation precision") group.add_argument("--quant-per-tensor", action="store_true", help="per tensor weight scaling") group.add_argument("--quant-disable", action="store_true", help="disable all quantizers") group.add_argument("--quant-disable-embeddings", action="store_true", help="disable all embeddings quantizers") group.add_argument("--quant-disable-keyword", type=str, nargs="+", help="disable quantizers by keyword") group.add_argument("--quant-disable-layer-module", type=str, help="disable quantizers by keyword under layer.") group.add_argument("--quant-enable-layer-module", type=str, help="enable quantizers by keyword under layer") group.add_argument("--calibrator", default="max", help="which quantization range calibrator to use") group.add_argument("--percentile", default=None, type=float, help="percentile for PercentileCalibrator") group.add_argument("--fuse-qkv", action="store_true", help="use the same scale factor for qkv") group.add_argument("--clip-gelu", metavar="N", type=float, help="clip gelu output maximum value to N") group.add_argument( "--recalibrate-weights", action="store_true", help=( "recalibrate weight amaxes by taking the max of the weights." " amaxes will be computed with the current quantization granularity (axis)." ), ) def set_default_quantizers(args): """Set default quantizers before creating the model.""" if args.calibrator == "max": calib_method = "max" elif args.calibrator == "percentile": if args.percentile is None: raise ValueError("Specify --percentile when using percentile calibrator") calib_method = "histogram" elif args.calibrator == "mse": calib_method = "histogram" else: raise ValueError(f"Invalid calibrator {args.calibrator}") input_desc = QuantDescriptor(num_bits=args.aprec, calib_method=calib_method) weight_desc = QuantDescriptor(num_bits=args.wprec, axis=(None if args.quant_per_tensor else (0,))) quant_nn.QuantLinear.set_default_quant_desc_input(input_desc) quant_nn.QuantLinear.set_default_quant_desc_weight(weight_desc) def configure_model(model, args, calib=False, eval=False): """Function called before the training loop.""" logger.info("Configuring Model for Quantization") logger.info(f"using quantization package {pytorch_quantization.__file__}") if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(model, ["embeddings"], which="weight", _disabled=True) if args.quant_disable: set_quantizer_by_name(model, [""], _disabled=True) if args.quant_disable_keyword: set_quantizer_by_name(model, args.quant_disable_keyword, _disabled=True) if args.quant_disable_layer_module: set_quantizer_by_name(model, [r"layer.\d+." + args.quant_disable_layer_module], _disabled=True) if args.quant_enable_layer_module: set_quantizer_by_name(model, [r"layer.\d+." + args.quant_enable_layer_module], _disabled=False) if args.recalibrate_weights: recalibrate_weights(model) if args.fuse_qkv: fuse_qkv(model, args) if args.clip_gelu: clip_gelu(model, args.clip_gelu) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(model) def enable_calibration(model): """Enable calibration of all *_input_quantizer modules in model.""" logger.info("Enabling Calibration") for name, module in model.named_modules(): if name.endswith("_quantizer"): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(f"{name:80}: {module}") def finish_calibration(model, args): """Disable calibration and load amax for all "*_input_quantizer modules in model.""" logger.info("Loading calibrated amax") for name, module in model.named_modules(): if name.endswith("_quantizer"): if module._calibrator is not None: if isinstance(module._calibrator, calib.MaxCalibrator): module.load_calib_amax() else: module.load_calib_amax("percentile", percentile=args.percentile) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(model) # ========================================== Helper Function ========================================== def fuse_qkv(model, args): """Adjust quantization ranges to match an implementation where the QKV projections are implemented with a single GEMM. Force the weight and output scale factors to match by taking the max of (Q,K,V). """ def fuse3(qq, qk, qv): for mod in [qq, qk, qv]: if not hasattr(mod, "_amax"): print(" WARNING: NO AMAX BUFFER") return q = qq._amax.detach().item() k = qk._amax.detach().item() v = qv._amax.detach().item() amax = max(q, k, v) qq._amax.fill_(amax) qk._amax.fill_(amax) qv._amax.fill_(amax) logger.info(f" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}") for name, mod in model.named_modules(): if name.endswith(".attention.self"): logger.info(f"FUSE_QKV: {name:{name_width}}") fuse3(mod.matmul_q_input_quantizer, mod.matmul_k_input_quantizer, mod.matmul_v_input_quantizer) if args.quant_per_tensor: fuse3(mod.query._weight_quantizer, mod.key._weight_quantizer, mod.value._weight_quantizer) def clip_gelu(model, maxval): """Clip activations generated by GELU to maxval when quantized. Implemented by adjusting the amax of the following input_quantizer. """ for name, mod in model.named_modules(): if name.endswith(".output.dense") and not name.endswith("attention.output.dense"): amax_init = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=maxval) amax = mod._input_quantizer._amax.data.detach().item() logger.info(f"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}") def expand_amax(model): """Expand per-tensor amax to be per channel, where each channel is assigned the per-tensor amax.""" for name, mod in model.named_modules(): if hasattr(mod, "_weight_quantizer") and mod._weight_quantizer.axis is not None: k = mod.weight.shape[0] amax = mod._weight_quantizer._amax.detach() mod._weight_quantizer._amax = torch.ones(k, dtype=amax.dtype, device=amax.device) * amax print(f"expanding {name} {amax} -> {mod._weight_quantizer._amax}") def recalibrate_weights(model): """Performs max calibration on the weights and updates amax.""" for name, mod in model.named_modules(): if hasattr(mod, "_weight_quantizer"): if not hasattr(mod.weight_quantizer, "_amax"): print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER") continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) axis_set = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis) reduce_axis = set(range(len(mod.weight.size()))) - axis_set amax = pytorch_quantization.utils.reduce_amax(mod.weight, axis=reduce_axis, keepdims=True).detach() logger.info(f"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}") mod._weight_quantizer._amax = amax def print_model_summary(model, name_width=25, line_width=180, ignore=None): """Print model quantization configuration.""" if ignore is None: ignore = [] elif not isinstance(ignore, list): ignore = [ignore] name_width = 0 for name, mod in model.named_modules(): if not hasattr(mod, "weight"): continue name_width = max(name_width, len(name)) for name, mod in model.named_modules(): input_q = getattr(mod, "_input_quantizer", None) weight_q = getattr(mod, "_weight_quantizer", None) if not hasattr(mod, "weight"): continue if type(mod) in ignore: continue if [True for s in ignore if isinstance(s, str) and s in name]: continue act_str = f"Act:{input_q.extra_repr()}" wgt_str = f"Wgt:{weight_q.extra_repr()}" s = f"{name:{name_width}} {act_str} {wgt_str}" if len(s) <= line_width: logger.info(s) else: logger.info(f"{name:{name_width}} {act_str}") logger.info(f'{" ":{name_width}} {wgt_str}') def print_quant_summary(model): """Print summary of all quantizer modules in the model.""" count = 0 for name, mod in model.named_modules(): if isinstance(mod, pytorch_quantization.nn.TensorQuantizer): print(f"{name:80} {mod}") count += 1 print(f"{count} TensorQuantizers found in model") def set_quantizer(name, mod, quantizer, k, v): """Set attributes for mod.quantizer.""" quantizer_mod = getattr(mod, quantizer, None) if quantizer_mod is not None: assert hasattr(quantizer_mod, k) setattr(quantizer_mod, k, v) else: logger.warning(f"{name} has no {quantizer}") def set_quantizers(name, mod, which="both", **kwargs): """Set quantizer attributes for mod.""" s = f"Warning: changing {which} quantizers of {name:{qname_width}}" for k, v in kwargs.items(): s += f" {k}={v}" if which in ["input", "both"]: set_quantizer(name, mod, "_input_quantizer", k, v) if which in ["weight", "both"]: set_quantizer(name, mod, "_weight_quantizer", k, v) logger.info(s) def set_quantizer_by_name(model, names, **kwargs): """Set quantizer attributes for layers where name contains a substring in names.""" for name, mod in model.named_modules(): if hasattr(mod, "_input_quantizer") or hasattr(mod, "_weight_quantizer"): for n in names: if re.search(n, name): set_quantizers(name, mod, **kwargs) elif name.endswith("_quantizer"): for n in names: if re.search(n, name): s = f"Warning: changing {name:{name_width}}" for k, v in kwargs.items(): s += f" {k}={v}" setattr(mod, k, v) logger.info(s)
transformers/examples/research_projects/quantization-qdqbert/quant_trainer.py/0
{ "file_path": "transformers/examples/research_projects/quantization-qdqbert/quant_trainer.py", "repo_id": "transformers", "token_count": 5092 }
326
"""Finetuning script for RAG models. Adapted from examples.seq2seq.finetune.py""" import argparse import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Any, Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch import torch.distributed as dist import torch.distributed as torch_distrib from pytorch_lightning.plugins.training_type import DDPPlugin from torch.utils.data import DataLoader from transformers import ( AutoConfig, AutoTokenizer, BartForConditionalGeneration, BatchEncoding, RagConfig, RagSequenceForGeneration, RagTokenForGeneration, RagTokenizer, T5ForConditionalGeneration, ) from transformers import logging as transformers_logging from transformers.integrations import is_ray_available if is_ray_available(): import ray from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever from callbacks_rag import ( # noqa: E402 # isort:skipq get_checkpoint_callback, get_early_stopping_callback, Seq2SeqLoggingCallback, ) from distributed_pytorch_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip from utils_rag import ( # noqa: E402 # isort:skip calculate_exact_match, flatten_list, get_git_info, is_rag_model, lmap, pickle_save, save_git_info, save_json, set_extra_model_params, Seq2SeqDataset, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) transformers_logging.set_verbosity_info() class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self class CustomDDP(DDPPlugin): def init_ddp_connection(self, global_rank=None, world_size=None) -> None: module = self.model global_rank = global_rank if global_rank is not None else self.cluster_environment.global_rank() world_size = world_size if world_size is not None else self.cluster_environment.world_size() os.environ["MASTER_ADDR"] = self.cluster_environment.master_address() os.environ["MASTER_PORT"] = str(self.cluster_environment.master_port()) if not torch.distributed.is_initialized(): logger.info(f"initializing ddp: GLOBAL_RANK: {global_rank}, MEMBER: {global_rank + 1}/{world_size}") torch_distrib.init_process_group(self.torch_distributed_backend, rank=global_rank, world_size=world_size) if module.is_rag_model: self.distributed_port = module.hparams.distributed_port if module.distributed_retriever == "pytorch": module.model.rag.retriever.init_retrieval(self.distributed_port) elif module.distributed_retriever == "ray" and global_rank == 0: # For the Ray retriever, only initialize it once when global # rank is 0. module.model.rag.retriever.init_retrieval() class GenerativeQAModule(BaseTransformer): mode = "generative_qa" loss_names = ["loss"] metric_names = ["em"] val_metric = "em" def __init__(self, hparams, **kwargs): # when loading from a pytorch lightning checkpoint, hparams are passed as dict if isinstance(hparams, dict): hparams = AttrDict(hparams) if hparams.model_type == "rag_sequence": self.model_class = RagSequenceForGeneration elif hparams.model_type == "rag_token": self.model_class = RagTokenForGeneration elif hparams.model_type == "bart": self.model_class = BartForConditionalGeneration else: self.model_class = T5ForConditionalGeneration self.is_rag_model = is_rag_model(hparams.model_type) config_class = RagConfig if self.is_rag_model else AutoConfig config = config_class.from_pretrained(hparams.model_name_or_path) # set retriever parameters config.index_name = hparams.index_name or config.index_name config.passages_path = hparams.passages_path or config.passages_path config.index_path = hparams.index_path or config.index_path config.use_dummy_dataset = hparams.use_dummy_dataset # set extra_model_params for generator configs and load_model extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "attention_dropout", "dropout") if self.is_rag_model: if hparams.prefix is not None: config.generator.prefix = hparams.prefix config.label_smoothing = hparams.label_smoothing hparams, config.generator = set_extra_model_params(extra_model_params, hparams, config.generator) if hparams.distributed_retriever == "pytorch": retriever = RagPyTorchDistributedRetriever.from_pretrained(hparams.model_name_or_path, config=config) elif hparams.distributed_retriever == "ray": # The Ray retriever needs the handles to the retriever actors. retriever = RagRayDistributedRetriever.from_pretrained( hparams.model_name_or_path, hparams.actor_handles, config=config ) model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config, retriever=retriever) prefix = config.question_encoder.prefix else: if hparams.prefix is not None: config.prefix = hparams.prefix hparams, config = set_extra_model_params(extra_model_params, hparams, config) model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config) prefix = config.prefix tokenizer = ( RagTokenizer.from_pretrained(hparams.model_name_or_path) if self.is_rag_model else AutoTokenizer.from_pretrained(hparams.model_name_or_path) ) super().__init__(hparams, config=config, tokenizer=tokenizer, model=model) save_git_info(self.hparams.output_dir) self.output_dir = Path(self.hparams.output_dir) self.metrics_save_path = Path(self.output_dir) / "metrics.json" self.hparams_save_path = Path(self.output_dir) / "hparams.pkl" pickle_save(self.hparams, self.hparams_save_path) self.step_count = 0 self.metrics = defaultdict(list) self.dataset_kwargs: dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": prefix or "", } n_observations_per_split = { "train": self.hparams.n_train, "val": self.hparams.n_val, "test": self.hparams.n_test, } self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} self.target_lens = { "train": self.hparams.max_target_length, "val": self.hparams.val_max_target_length, "test": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}" assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}" self.hparams.git_sha = get_git_info()["repo_sha"] self.num_workers = hparams.num_workers self.distributed_port = self.hparams.distributed_port # For single GPU training, init_ddp_connection is not called. # So we need to initialize the retrievers here. if hparams.gpus <= 1: if hparams.distributed_retriever == "ray": self.model.retriever.init_retrieval() elif hparams.distributed_retriever == "pytorch": self.model.retriever.init_retrieval(self.distributed_port) self.distributed_retriever = hparams.distributed_retriever def forward(self, input_ids, **kwargs): return self.model(input_ids, **kwargs) def ids_to_clean_text(self, generated_ids: List[int]): gen_text = self.tokenizer.batch_decode( generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True ) return lmap(str.strip, gen_text) def _step(self, batch: dict) -> Tuple: source_ids, source_mask, target_ids = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"] rag_kwargs = {} if isinstance(self.model, T5ForConditionalGeneration): decoder_input_ids = self.model._shift_right(target_ids) lm_labels = target_ids elif isinstance(self.model, BartForConditionalGeneration): decoder_input_ids = target_ids[:, :-1].contiguous() lm_labels = target_ids[:, 1:].clone() else: assert self.is_rag_model generator = self.model.rag.generator if isinstance(generator, T5ForConditionalGeneration): decoder_start_token_id = generator.config.decoder_start_token_id decoder_input_ids = ( torch.cat( [torch.tensor([[decoder_start_token_id]] * target_ids.shape[0]).to(target_ids), target_ids], dim=1, ) if target_ids.shape[0] < self.target_lens["train"] else generator._shift_right(target_ids) ) elif isinstance(generator, BartForConditionalGeneration): decoder_input_ids = target_ids lm_labels = decoder_input_ids rag_kwargs["reduce_loss"] = True assert decoder_input_ids is not None outputs = self( source_ids, attention_mask=source_mask, decoder_input_ids=decoder_input_ids, use_cache=False, labels=lm_labels, **rag_kwargs, ) loss = outputs["loss"] return (loss,) @property def pad(self) -> int: raise NotImplementedError("pad not implemented") def training_step(self, batch, batch_idx) -> Dict: loss_tensors = self._step(batch) logs = {name: loss.detach() for name, loss in zip(self.loss_names, loss_tensors)} # tokens per batch tgt_pad_token_id = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer.pad_token_id ) src_pad_token_id = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer.pad_token_id ) logs["tpb"] = ( batch["input_ids"].ne(src_pad_token_id).sum() + batch["decoder_input_ids"].ne(tgt_pad_token_id).sum() ) return {"loss": loss_tensors[0], "log": logs} def validation_step(self, batch, batch_idx) -> Dict: return self._generative_step(batch) def validation_epoch_end(self, outputs, prefix="val") -> Dict: self.step_count += 1 losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names} loss = losses["loss"] gen_metrics = { k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"] } metrics_tensor: torch.FloatTensor = torch.tensor(gen_metrics[self.val_metric]).type_as(loss) gen_metrics.update({k: v.item() for k, v in losses.items()}) # fix for https://github.com/PyTorchLightning/pytorch-lightning/issues/2424 if dist.is_initialized(): dist.all_reduce(metrics_tensor, op=dist.ReduceOp.SUM) metrics_tensor = metrics_tensor / dist.get_world_size() gen_metrics.update({self.val_metric: metrics_tensor.item()}) losses.update(gen_metrics) metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()} metrics["step_count"] = self.step_count self.save_metrics(metrics, prefix) # writes to self.metrics_save_path preds = flatten_list([x["preds"] for x in outputs]) return {"log": metrics, "preds": preds, f"{prefix}_loss": loss, f"{prefix}_{self.val_metric}": metrics_tensor} def save_metrics(self, latest_metrics, type_path) -> None: self.metrics[type_path].append(latest_metrics) save_json(self.metrics, self.metrics_save_path) def calc_generative_metrics(self, preds, target) -> Dict: return calculate_exact_match(preds, target) def _generative_step(self, batch: dict) -> dict: start_time = time.time() batch = BatchEncoding(batch).to(device=self.model.device) generated_ids = self.model.generate( batch["input_ids"], attention_mask=batch["attention_mask"], do_deduplication=False, # rag specific parameter use_cache=True, min_length=1, max_length=self.target_lens["val"], ) gen_time = (time.time() - start_time) / batch["input_ids"].shape[0] preds: List[str] = self.ids_to_clean_text(generated_ids) target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"]) loss_tensors = self._step(batch) base_metrics = dict(zip(self.loss_names, loss_tensors)) gen_metrics: Dict = self.calc_generative_metrics(preds, target) summ_len = np.mean(lmap(len, generated_ids)) base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **gen_metrics) return base_metrics def test_step(self, batch, batch_idx): return self._generative_step(batch) def test_epoch_end(self, outputs): return self.validation_epoch_end(outputs, prefix="test") def get_dataset(self, type_path) -> Seq2SeqDataset: n_obs = self.n_obs[type_path] max_target_length = self.target_lens[type_path] dataset = Seq2SeqDataset( self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs, ) return dataset def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader: dataset = self.get_dataset(type_path) dataloader = DataLoader( dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, ) return dataloader def train_dataloader(self) -> DataLoader: dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True) return dataloader def val_dataloader(self) -> DataLoader: return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size) def test_dataloader(self) -> DataLoader: return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size) @pl.utilities.rank_zero_only def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: save_path = self.output_dir.joinpath("checkpoint{}".format(self.step_count)) self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) @staticmethod def add_model_specific_args(parser, root_dir): BaseTransformer.add_model_specific_args(parser, root_dir) add_generic_args(parser, root_dir) parser.add_argument( "--max_source_length", default=128, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--max_target_length", default=25, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--val_max_target_length", default=25, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--test_max_target_length", default=25, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default") parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument("--n_val", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument("--label_smoothing", type=float, default=0.0, required=False) parser.add_argument( "--prefix", type=str, default=None, help="Prefix added at the beginning of each text, typically used with T5-based models.", ) parser.add_argument( "--early_stopping_patience", type=int, default=-1, required=False, help=( "-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So" " val_check_interval will effect it." ), ) parser.add_argument( "--distributed-port", type=int, default=-1, required=False, help="Port number for distributed training." ) parser.add_argument( "--model_type", choices=["rag_sequence", "rag_token", "bart", "t5"], type=str, help=( "RAG model type: sequence or token, if none specified, the type is inferred from the" " model_name_or_path" ), ) return parser @staticmethod def add_retriever_specific_args(parser): parser.add_argument( "--index_name", type=str, default=None, help=( "Name of the index to use: 'hf' for a canonical dataset from the datasets library (default), 'custom'" " for a local index, or 'legacy' for the orignal one)" ), ) parser.add_argument( "--passages_path", type=str, default=None, help=( "Path to the dataset of passages for custom index. More info about custom indexes in the RagRetriever" " documentation as well as in `examples/rag/use_own_knowledge_dataset.py`" ), ) parser.add_argument( "--index_path", type=str, default=None, help=( "Path to the faiss index for custom index. More info about custom indexes in the RagRetriever" " documentation as well as in `examples/rag/use_own_knowledge_dataset.py`" ), ) parser.add_argument( "--distributed_retriever", choices=["ray", "pytorch"], type=str, default="pytorch", help=( "What implementation to use for distributed retriever? If " "pytorch is selected, the index is loaded on training " "worker 0, and torch.distributed is used to handle " "communication between training worker 0, and the other " "training workers. If ray is selected, the Ray library is " "used to create load the index on separate processes, " "and Ray handles the communication between the training " "workers and the retrieval actors." ), ) parser.add_argument( "--use_dummy_dataset", type=bool, default=False, help=( "Whether to use the dummy version of the dataset index. More info about custom indexes in the" " RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`" ), ) return parser @staticmethod def add_ray_specific_args(parser): # Ray cluster address. parser.add_argument( "--ray-address", default="auto", type=str, help=( "The address of the Ray cluster to connect to. If not " "specified, Ray will attempt to automatically detect the " "cluster. Has no effect if pytorch is used as the distributed " "retriever." ), ) parser.add_argument( "--num_retrieval_workers", type=int, default=1, help=( "The number of retrieval actors to use when Ray is selected " "for the distributed retriever. Has no effect when " "distributed_retriever is set to pytorch." ), ) return parser def main(args=None, model=None) -> GenerativeQAModule: parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd()) parser = GenerativeQAModule.add_retriever_specific_args(parser) args = args or parser.parse_args() Path(args.output_dir).mkdir(exist_ok=True) named_actors = [] if args.distributed_retriever == "ray" and args.gpus > 1: if not is_ray_available(): raise RuntimeError("Please install Ray to use the Ray distributed retriever.") # Connect to an existing Ray cluster. try: ray.init(address=args.ray_address, namespace="rag") except (ConnectionError, ValueError): logger.warning( "Connection to Ray cluster failed. Make sure a Ray " "cluster is running by either using Ray's cluster " "launcher (`ray up`) or by manually starting Ray on " "each node via `ray start --head` for the head node " "and `ray start --address='<ip address>:6379'` for " "additional nodes. See " "https://docs.ray.io/en/master/cluster/index.html " "for more info." ) raise # Create Ray actors only for rank 0. if ("LOCAL_RANK" not in os.environ or int(os.environ["LOCAL_RANK"]) == 0) and ( "NODE_RANK" not in os.environ or int(os.environ["NODE_RANK"]) == 0 ): remote_cls = ray.remote(RayRetriever) named_actors = [ remote_cls.options(name="retrieval_worker_{}".format(i)).remote() for i in range(args.num_retrieval_workers) ] else: logger.info( "Getting named actors for NODE_RANK {}, LOCAL_RANK {}".format( os.environ["NODE_RANK"], os.environ["LOCAL_RANK"] ) ) named_actors = [ray.get_actor("retrieval_worker_{}".format(i)) for i in range(args.num_retrieval_workers)] args.actor_handles = named_actors assert args.actor_handles == named_actors if model is None: model: GenerativeQAModule = GenerativeQAModule(args) dataset = Path(args.data_dir).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir).startswith("/tmp") or str(args.output_dir).startswith("/var") ): training_logger = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger project = os.environ.get("WANDB_PROJECT", dataset) training_logger = WandbLogger(name=model.output_dir.name, project=project) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger training_logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}") es_callback = ( get_early_stopping_callback(model.val_metric, args.early_stopping_patience) if args.early_stopping_patience >= 0 else False ) trainer: pl.Trainer = generic_train( model, args, logging_callback=Seq2SeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(args.output_dir, model.val_metric), early_stopping_callback=es_callback, logger=training_logger, custom_ddp_plugin=CustomDDP() if args.gpus > 1 else None, profiler=pl.profiler.AdvancedProfiler() if args.profile else None, ) pickle_save(model.hparams, model.output_dir / "hparams.pkl") if not args.do_predict: return model # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd()) parser = GenerativeQAModule.add_retriever_specific_args(parser) parser = GenerativeQAModule.add_ray_specific_args(parser) # Pytorch Lightning Profiler parser.add_argument( "--profile", action="store_true", help="If True, use pytorch_lightning.profiler.AdvancedProfiler to profile the Trainer.", ) args = parser.parse_args() main(args)
transformers/examples/research_projects/rag/finetune_rag.py/0
{ "file_path": "transformers/examples/research_projects/rag/finetune_rag.py", "repo_id": "transformers", "token_count": 11834 }
327
# Script for verifying that run_bart_sum can be invoked from its directory # Get tiny dataset with cnn_dm format (4 examples for train, val, test) wget https://cdn-datasets.huggingface.co/summarization/cnn_tiny.tgz tar -xzvf cnn_tiny.tgz rm cnn_tiny.tgz export OUTPUT_DIR_NAME=bart_utest_output export CURRENT_DIR=${PWD} export OUTPUT_DIR=${CURRENT_DIR}/${OUTPUT_DIR_NAME} # Make output directory if it doesn't exist mkdir -p $OUTPUT_DIR # Add parent directory to python path to access lightning_base.py and testing_utils.py export PYTHONPATH="../":"${PYTHONPATH}" python finetune.py \ --data_dir=cnn_tiny/ \ --model_name_or_path=sshleifer/bart-tiny-random \ --learning_rate=3e-5 \ --train_batch_size=2 \ --eval_batch_size=2 \ --output_dir=$OUTPUT_DIR \ --num_train_epochs=1 \ --gpus=0 \ --do_train "$@" rm -rf cnn_tiny rm -rf $OUTPUT_DIR
transformers/examples/research_projects/seq2seq-distillation/finetune_bart_tiny.sh/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/finetune_bart_tiny.sh", "repo_id": "transformers", "token_count": 333 }
328
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The Microsoft and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for tapex on table-based question answering tasks. Adapted from script: https://github.com/huggingface/transformers/blob/master/examples/pytorch/summarization/run_summarization.py """ import logging import os import sys from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from functools import partial from typing import List, Optional import nltk # Here to have a nice missing dependency error message early on import numpy as np import pandas as pd from datasets import load_dataset from filelock import FileLock from wikisql_utils import _TYPE_CONVERTER, retrieve_wikisql_query_answer_tapas import transformers from transformers import ( AutoConfig, BartForConditionalGeneration, DataCollatorForSeq2Seq, HfArgumentParser, Seq2SeqTrainer, Seq2SeqTrainingArguments, TapexTokenizer, set_seed, ) from transformers.file_utils import is_offline_mode from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") logger = logging.getLogger(__name__) try: nltk.data.find("tokenizers/punkt") except (LookupError, OSError): if is_offline_mode(): raise LookupError( "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" ) with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={ "help": ( "Pretrained tokenizer name or path if not the same as model_name. " "By default we use BART-large tokenizer for TAPEX-large." ) }, ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default="wikisql", metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} ) validation_file: Optional[str] = field( default=None, metadata={ "help": ( "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." ) }, ) test_file: Optional[str] = field( default=None, metadata={ "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_source_length: Optional[int] = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_target_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) val_max_target_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) num_beams: Optional[int] = field( default=None, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " "which is used during ``evaluate`` and ``predict``." ) }, ) ignore_pad_token_for_loss: bool = field( default=True, metadata={ "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." }, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.val_max_target_length is None: self.val_max_target_length = self.max_target_length def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) # IMPORTANT: the initial BART model's decoding is penalized by no_repeat_ngram_size, and thus # we should disable it here to avoid problematic generation config.no_repeat_ngram_size = 0 config.max_length = 1024 config.early_stopping = False # load tapex tokenizer tokenizer = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, add_prefix_space=True, ) # load Bart based Tapex model (default tapex-large) model = BartForConditionalGeneration.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") # Preprocessing the datasets. # We need to tokenize inputs and targets. if training_args.do_train: column_names = datasets["train"].column_names elif training_args.do_eval: column_names = datasets["validation"].column_names elif training_args.do_predict: column_names = datasets["test"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return # Temporarily set max_target_length for training. max_target_length = data_args.max_target_length padding = "max_length" if data_args.pad_to_max_length else False if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): logger.warning( "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for " f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" ) def preprocess_tableqa_function(examples, is_training=False): """ The is_training FLAG is used to identify if we could use the supervision to truncate the table content if it is required. """ # this function is specific for WikiSQL since the util function need the data structure # to retrieve the WikiSQL answer for each question def _convert_table_types(_table): """Runs the type converter over the table cells.""" ret_table = deepcopy(_table) types = ret_table["types"] ret_table["real_rows"] = ret_table["rows"] typed_rows = [] for row in ret_table["rows"]: typed_row = [] for column, cell_value in enumerate(row): typed_row.append(_TYPE_CONVERTER[types[column]](cell_value)) typed_rows.append(typed_row) ret_table["rows"] = typed_rows return ret_table questions = [question.lower() for question in examples["question"]] example_tables = examples["table"] example_sqls = examples["sql"] tables = [ pd.DataFrame.from_records(example_table["rows"], columns=example_table["header"]) for example_table in example_tables ] # using tapas utils to obtain wikisql answer answers = [] for example_sql, example_table in zip(example_sqls, example_tables): tapas_table = _convert_table_types(example_table) answer_list: List[str] = retrieve_wikisql_query_answer_tapas(tapas_table, example_sql) # you can choose other delimiters to split each answer answers.append(answer_list) # IMPORTANT: we cannot pass by answers during evaluation, answers passed during training are used to # truncate large tables in the train set! if is_training: model_inputs = tokenizer( table=tables, query=questions, answer=answers, max_length=data_args.max_source_length, padding=padding, truncation=True, ) else: model_inputs = tokenizer( table=tables, query=questions, max_length=data_args.max_source_length, padding=padding, truncation=True ) labels = tokenizer( answer=[", ".join(answer) for answer in answers], max_length=max_target_length, padding=padding, truncation=True, ) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs # in training, we can use the answer as extra information to truncate large tables preprocess_tableqa_function_training = partial(preprocess_tableqa_function, is_training=True) if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) train_dataset = train_dataset.map( preprocess_tableqa_function_training, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: max_target_length = data_args.val_max_target_length if "validation" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation"] if data_args.max_eval_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) eval_dataset = eval_dataset.map( preprocess_tableqa_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_predict: max_target_length = data_args.val_max_target_length if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = datasets["test"] if data_args.max_predict_samples is not None: predict_dataset = predict_dataset.select(range(data_args.max_predict_samples)) predict_dataset = predict_dataset.map( preprocess_tableqa_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if training_args.fp16 else None, ) def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [label.strip() for label in labels] return preds, labels def compute_metrics(eval_preds): preds, labels = eval_preds if isinstance(preds, tuple): preds = preds[0] decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) if data_args.ignore_pad_token_for_loss: # Replace -100 in the labels as we can't decode them. labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) # Some simple post-processing decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) delimiter = ", " # define example evaluation def evaluate_example(predict_str: str, ground_str: str): predict_spans = predict_str.split(delimiter) ground_spans = ground_str.split(delimiter) predict_values = defaultdict(lambda: 0) ground_values = defaultdict(lambda: 0) for span in predict_spans: try: predict_values[float(span)] += 1 except ValueError: predict_values[span.strip()] += 1 for span in ground_spans: try: ground_values[float(span)] += 1 except ValueError: ground_values[span.strip()] += 1 is_correct = predict_values == ground_values return is_correct def get_denotation_accuracy(predictions: List[str], references: List[str]): assert len(predictions) == len(references) correct_num = 0 for predict_str, ground_str in zip(predictions, references): is_correct = evaluate_example(predict_str.lower(), ground_str.lower()) if is_correct: correct_num += 1 return correct_num / len(predictions) accuracy = get_denotation_accuracy(decoded_preds, decoded_labels) result = {"denotation_accuracy": accuracy} return result # Initialize our Trainer trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, ) if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate( max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, metric_key_prefix="eval" ) max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Predict ***") predict_results = trainer.predict( predict_dataset, metric_key_prefix="predict", max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, ) metrics = predict_results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) if trainer.is_world_process_zero(): if training_args.predict_with_generate: predictions = tokenizer.batch_decode( predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True ) predictions = [pred.strip() for pred in predictions] output_prediction_file = os.path.join(training_args.output_dir, "tapex_predictions.txt") with open(output_prediction_file, "w") as writer: writer.write("\n".join(predictions)) return results def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/research_projects/tapex/run_wikisql_with_tapex.py/0
{ "file_path": "transformers/examples/research_projects/tapex/run_wikisql_with_tapex.py", "repo_id": "transformers", "token_count": 11008 }
329
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def load_config(config_path, display=False): config = OmegaConf.load(config_path) if display: print(yaml.dump(OmegaConf.to_container(config))) return config def load_vqgan(device, conf_path=None, ckpt_path=None): if conf_path is None: conf_path = "./model_checkpoints/vqgan_only.yaml" config = load_config(conf_path, display=False) model = VQModel(**config.model.params) if ckpt_path is None: ckpt_path = "./model_checkpoints/vqgan_only.pt" sd = torch.load(ckpt_path, map_location=device) if ".ckpt" in ckpt_path: sd = sd["state_dict"] model.load_state_dict(sd, strict=True) model.to(device) del sd return model def reconstruct_with_vqgan(x, model): z, _, [_, _, indices] = model.encode(x) print(f"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}") xrec = model.decode(z) return xrec def get_obj_from_str(string, reload=False): module, cls = string.rsplit(".", 1) if reload: module_imp = importlib.import_module(module) importlib.reload(module_imp) return getattr(importlib.import_module(module, package=None), cls) def instantiate_from_config(config): if "target" not in config: raise KeyError("Expected key `target` to instantiate.") return get_obj_from_str(config["target"])(**config.get("params", {})) def load_model_from_config(config, sd, gpu=True, eval_mode=True): model = instantiate_from_config(config) if sd is not None: model.load_state_dict(sd) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def load_model(config, ckpt, gpu, eval_mode): # load the specified checkpoint if ckpt: pl_sd = torch.load(ckpt, map_location="cpu") global_step = pl_sd["global_step"] print(f"loaded model from global step {global_step}.") else: pl_sd = {"state_dict": None} global_step = None model = load_model_from_config(config.model, pl_sd["state_dict"], gpu=gpu, eval_mode=eval_mode)["model"] return model, global_step
transformers/examples/research_projects/vqgan-clip/loaders.py/0
{ "file_path": "transformers/examples/research_projects/vqgan-clip/loaders.py", "repo_id": "transformers", "token_count": 926 }
330
#!/usr/bin/env python3 import logging import pathlib import re import sys from dataclasses import dataclass, field from typing import Any, Callable, Dict, List, Optional, Set, Union import datasets import librosa import numpy as np import torch from lang_trans import arabic from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2ForCTC, Wav2Vec2Processor, is_apex_available, trainer_utils, ) if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): _is_native_amp_available = True from torch.cuda.amp import autocast logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) freeze_feature_extractor: Optional[bool] = field( default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) verbose_logging: Optional[bool] = field( default=False, metadata={"help": "Whether to log verbose messages or not."}, ) def configure_logger(model_args: ModelArguments, training_args: TrainingArguments): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logging_level = logging.WARNING if model_args.verbose_logging: logging_level = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank): logging_level = logging.INFO logger.setLevel(logging_level) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: str = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_split_name: Optional[str] = field( default="train", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) validation_split_name: Optional[str] = field( default="validation", metadata={ "help": ( "The name of the validation data set split to use (via the datasets library). Defaults to 'validation'" ) }, ) target_text_column: Optional[str] = field( default="text", metadata={"help": "Column in the dataset that contains label (target text). Defaults to 'text'"}, ) speech_file_column: Optional[str] = field( default="file", metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"}, ) target_feature_extractor_sampling_rate: Optional[bool] = field( default=False, metadata={"help": "Resample loaded audio to target feature extractor's sampling rate or not."}, ) max_duration_in_seconds: Optional[float] = field( default=None, metadata={"help": "Filters out examples longer than specified. Defaults to no filtering."}, ) orthography: Optional[str] = field( default="librispeech", metadata={ "help": ( "Orthography used for normalization and tokenization: 'librispeech' (default), 'timit', or" " 'buckwalter'." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) @dataclass class Orthography: """ Orthography scheme used for text normalization and tokenization. Args: do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to accept lowercase input and lowercase the output when decoding. vocab_file (:obj:`str`, `optional`): File containing the vocabulary. word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`"|"`): The token used for delimiting words; it needs to be in the vocabulary. translation_table (:obj:`Dict[str, str]`, `optional`, defaults to :obj:`{}`): Table to use with `str.translate()` when preprocessing text (e.g., "-" -> " "). words_to_remove (:obj:`Set[str]`, `optional`, defaults to :obj:`set()`): Words to remove when preprocessing text (e.g., "sil"). untransliterator (:obj:`Callable[[str], str]`, `optional`): Function that untransliterates text back into native writing system. """ do_lower_case: bool = False vocab_file: Optional[str] = None word_delimiter_token: Optional[str] = "|" translation_table: Optional[Dict[str, str]] = field(default_factory=dict) words_to_remove: Optional[Set[str]] = field(default_factory=set) untransliterator: Optional[Callable[[str], str]] = None @classmethod def from_name(cls, name: str): if name == "librispeech": return cls() if name == "timit": return cls( do_lower_case=True, # break compounds like "quarter-century-old" and replace pauses "--" translation_table=str.maketrans({"-": " "}), ) if name == "buckwalter": translation_table = { "-": " ", # sometimes used to represent pauses "^": "v", # fixing "tha" in arabic_speech_corpus dataset } return cls( vocab_file=pathlib.Path(__file__).parent.joinpath("vocab/buckwalter.json"), word_delimiter_token="/", # "|" is Arabic letter alef with madda above translation_table=str.maketrans(translation_table), words_to_remove={"sil"}, # fixing "sil" in arabic_speech_corpus dataset untransliterator=arabic.buckwalter.untransliterate, ) raise ValueError(f"Unsupported orthography: '{name}'.") def preprocess_for_training(self, text: str) -> str: # TODO(elgeish) return a pipeline (e.g., from jiwer) instead? Or rely on branch predictor as is if len(self.translation_table) > 0: text = text.translate(self.translation_table) if len(self.words_to_remove) == 0: text = " ".join(text.split()) # clean up whitespaces else: text = " ".join(w for w in text.split() if w not in self.words_to_remove) # and clean up whilespaces return text def create_processor(self, model_args: ModelArguments) -> Wav2Vec2Processor: feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir ) if self.vocab_file: tokenizer = Wav2Vec2CTCTokenizer( self.vocab_file, cache_dir=model_args.cache_dir, do_lower_case=self.do_lower_case, word_delimiter_token=self.word_delimiter_token, ) else: tokenizer = Wav2Vec2CTCTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_lower_case=self.do_lower_case, word_delimiter_token=self.word_delimiter_token, ) return Wav2Vec2Processor(feature_extractor, tokenizer) @dataclass class DataCollatorCTCWithPadding: """ Data collator that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) labels_batch = self.processor.pad( labels=label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors="pt", ) # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch["labels"] = labels return batch class CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. """ model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": loss = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": loss = loss.sum() / (inputs["labels"] >= 0).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']") if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() elif self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach() def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() configure_logger(model_args, training_args) orthography = Orthography.from_name(data_args.orthography.lower()) processor = orthography.create_processor(model_args) model = Wav2Vec2ForCTC.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing, vocab_size=len(processor.tokenizer), ) train_dataset = datasets.load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name ) val_dataset = datasets.load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.validation_split_name ) wer_metric = datasets.load_metric("wer") target_sr = processor.feature_extractor.sampling_rate if data_args.target_feature_extractor_sampling_rate else None vocabulary_chars_str = "".join(t for t in processor.tokenizer.get_vocab().keys() if len(t) == 1) vocabulary_text_cleaner = re.compile( # remove characters not in vocabulary rf"[^\s{re.escape(vocabulary_chars_str)}]", # allow space in addition to chars in vocabulary flags=re.IGNORECASE if processor.tokenizer.do_lower_case else 0, ) text_updates = [] def prepare_example(example): # TODO(elgeish) make use of multiprocessing? example["speech"], example["sampling_rate"] = librosa.load(example[data_args.speech_file_column], sr=target_sr) if data_args.max_duration_in_seconds is not None: example["duration_in_seconds"] = len(example["speech"]) / example["sampling_rate"] # Normalize and clean up text; order matters! updated_text = orthography.preprocess_for_training(example[data_args.target_text_column]) updated_text = vocabulary_text_cleaner.sub("", updated_text) if updated_text != example[data_args.target_text_column]: text_updates.append((example[data_args.target_text_column], updated_text)) example[data_args.target_text_column] = updated_text return example train_dataset = train_dataset.map(prepare_example, remove_columns=[data_args.speech_file_column]) val_dataset = val_dataset.map(prepare_example, remove_columns=[data_args.speech_file_column]) if data_args.max_duration_in_seconds is not None: def filter_by_max_duration(example): return example["duration_in_seconds"] <= data_args.max_duration_in_seconds old_train_size = len(train_dataset) old_val_size = len(val_dataset) train_dataset = train_dataset.filter(filter_by_max_duration, remove_columns=["duration_in_seconds"]) val_dataset = val_dataset.filter(filter_by_max_duration, remove_columns=["duration_in_seconds"]) if len(train_dataset) > old_train_size: logger.warning( f"Filtered out {len(train_dataset) - old_train_size} train example(s) longer than" f" {data_args.max_duration_in_seconds} second(s)." ) if len(val_dataset) > old_val_size: logger.warning( f"Filtered out {len(val_dataset) - old_val_size} validation example(s) longer than" f" {data_args.max_duration_in_seconds} second(s)." ) logger.info(f"Split sizes: {len(train_dataset)} train and {len(val_dataset)} validation.") logger.warning(f"Updated {len(text_updates)} transcript(s) using '{data_args.orthography}' orthography rules.") if logger.isEnabledFor(logging.DEBUG): for original_text, updated_text in text_updates: logger.debug(f'Updated text: "{original_text}" -> "{updated_text}"') text_updates = None def prepare_dataset(batch): # check that all files have the correct sampling rate assert ( len(set(batch["sampling_rate"])) == 1 ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}." processed_batch = processor( audio=batch["speech"], text=batch[data_args.target_text_column], sampling_rate=batch["sampling_rate"][0] ) batch.update(processed_batch) return batch train_dataset = train_dataset.map( prepare_dataset, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers, ) val_dataset = val_dataset.map( prepare_dataset, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers, ) data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True) def compute_metrics(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) if logger.isEnabledFor(logging.DEBUG): for reference, predicted in zip(label_str, pred_str): logger.debug(f'reference: "{reference}"') logger.debug(f'predicted: "{predicted}"') if orthography.untransliterator is not None: logger.debug(f'reference (untransliterated): "{orthography.untransliterator(reference)}"') logger.debug(f'predicted (untransliterated): "{orthography.untransliterator(predicted)}"') wer = wer_metric.compute(predictions=pred_str, references=label_str) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() trainer = CTCTrainer( model=model, data_collator=data_collator, args=training_args, compute_metrics=compute_metrics, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=processor.feature_extractor, ) trainer.train() if __name__ == "__main__": main()
transformers/examples/research_projects/wav2vec2/run_asr.py/0
{ "file_path": "transformers/examples/research_projects/wav2vec2/run_asr.py", "repo_id": "transformers", "token_count": 8256 }
331
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Multiple-choice training (e.g. SWAG) This folder contains the `run_swag.py` script, showing an examples of *multiple-choice answering* with the 🤗 Transformers library. For straightforward use-cases you may be able to use these scripts without modification, although we have also included comments in the code to indicate areas that you may need to adapt to your own projects. ### Multi-GPU and TPU usage By default, the script uses a `MirroredStrategy` and will use multiple GPUs effectively if they are available. TPUs can also be used by passing the name of the TPU resource with the `--tpu` argument. ### Memory usage and data loading One thing to note is that all data is loaded into memory in this script. Most multiple-choice datasets are small enough that this is not an issue, but if you have a very large dataset you will need to modify the script to handle data streaming. This is particularly challenging for TPUs, given the stricter requirements and the sheer volume of data required to keep them fed. A full explanation of all the possible pitfalls is a bit beyond this example script and README, but for more information you can see the 'Input Datasets' section of [this document](https://www.tensorflow.org/guide/tpu). ### Example command ```bash python run_swag.py \ --model_name_or_path distilbert/distilbert-base-cased \ --output_dir output \ --do_eval \ --do_train ```
transformers/examples/tensorflow/multiple-choice/README.md/0
{ "file_path": "transformers/examples/tensorflow/multiple-choice/README.md", "repo_id": "transformers", "token_count": 513 }
332
#!/usr/bin/env python # coding: utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny model through reduction of a normal pre-trained model, but keeping the # full vocab, merges file, and thus also resulting in a larger model due to a large vocab size. # This gives ~3MB in total for all files. # # If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated # # # It will be used then as "stas/tiny-wmt19-en-de" # Build from transformers import FSMTConfig, FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-en-de" tokenizer = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model config = FSMTConfig.from_pretrained(mname) config.update({ "d_model": 4, "encoder_layers": 1, "decoder_layers": 1, "encoder_ffn_dim": 4, "decoder_ffn_dim": 4, "encoder_attention_heads": 1, "decoder_attention_heads": 1}) tiny_model = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test batch = tokenizer(["Making tiny model"], return_tensors="pt") outputs = tiny_model(**batch) print("test output:", len(outputs.logits[0])) # Save mname_tiny = "tiny-wmt19-en-de" tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-de
transformers/scripts/fsmt/fsmt-make-tiny-model.py/0
{ "file_path": "transformers/scripts/fsmt/fsmt-make-tiny-model.py", "repo_id": "transformers", "token_count": 704 }
333
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import re from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union from .. import is_torch_available from ..utils import logging as transformers_logging from ..utils.import_utils import is_pygments_available from .agent_types import AgentAudio, AgentImage, AgentText from .default_tools import BASE_PYTHON_TOOLS, FinalAnswerTool, setup_default_tools from .llm_engine import HfApiEngine, MessageRole from .prompts import ( DEFAULT_CODE_SYSTEM_PROMPT, DEFAULT_REACT_CODE_SYSTEM_PROMPT, DEFAULT_REACT_JSON_SYSTEM_PROMPT, PLAN_UPDATE_FINAL_PLAN_REDACTION, PROMPTS_FOR_INITIAL_PLAN, PROMPTS_FOR_PLAN_UPDATE, SUPPORTED_PLAN_TYPES, SYSTEM_PROMPT_FACTS, SYSTEM_PROMPT_FACTS_UPDATE, USER_PROMPT_FACTS_UPDATE, ) from .python_interpreter import LIST_SAFE_MODULES, evaluate_python_code from .tools import ( DEFAULT_TOOL_DESCRIPTION_TEMPLATE, Tool, get_tool_description_with_args, load_tool, ) if is_pygments_available(): from pygments import highlight from pygments.formatters import Terminal256Formatter from pygments.lexers import PythonLexer class CustomFormatter(logging.Formatter): grey = "\x1b[38;20m" bold_yellow = "\x1b[33;1m" red = "\x1b[31;20m" green = "\x1b[32;20m" bold_red = "\x1b[31;1m" bold_white = "\x1b[37;1m" reset = "\x1b[0m" format = "%(message)s" FORMATS = { logging.DEBUG: grey + format + reset, logging.INFO: format, logging.WARNING: bold_yellow + format + reset, 31: reset + format + reset, 32: green + format + reset, 33: bold_white + format + reset, logging.ERROR: red + format + reset, logging.CRITICAL: bold_red + format + reset, } def format(self, record): log_fmt = self.FORMATS.get(record.levelno) formatter = logging.Formatter(log_fmt) return formatter.format(record) logger = transformers_logging.get_logger(__name__) logger.propagate = False ch = logging.StreamHandler() ch.setFormatter(CustomFormatter()) logger.addHandler(ch) def parse_json_blob(json_blob: str) -> Dict[str, str]: try: first_accolade_index = json_blob.find("{") last_accolade_index = [a.start() for a in list(re.finditer("}", json_blob))][-1] json_blob = json_blob[first_accolade_index : last_accolade_index + 1].replace('\\"', "'") json_data = json.loads(json_blob, strict=False) return json_data except json.JSONDecodeError as e: place = e.pos if json_blob[place - 1 : place + 2] == "},\n": raise ValueError( "JSON is invalid: you probably tried to provide multiple tool calls in one action. PROVIDE ONLY ONE TOOL CALL." ) raise ValueError( f"The JSON blob you used is invalid due to the following error: {e}.\n" f"JSON blob was: {json_blob}, decoding failed on that specific part of the blob:\n" f"'{json_blob[place-4:place+5]}'." ) except Exception as e: raise ValueError(f"Error in parsing the JSON blob: {e}") def parse_code_blob(code_blob: str) -> str: try: pattern = r"```(?:py|python)?\n(.*?)\n```" match = re.search(pattern, code_blob, re.DOTALL) return match.group(1).strip() except Exception as e: raise ValueError( f""" The code blob you used is invalid: due to the following error: {e} This means that the regex pattern {pattern} was not respected: make sure to include code with the correct pattern, for instance: Thoughts: Your thoughts Code: ```py # Your python code here ```<end_action>""" ) def parse_json_tool_call(json_blob: str) -> Tuple[str, Dict[str, str]]: json_blob = json_blob.replace("```json", "").replace("```", "") tool_call = parse_json_blob(json_blob) if "action" in tool_call and "action_input" in tool_call: return tool_call["action"], tool_call["action_input"] elif "action" in tool_call: return tool_call["action"], None else: raise ValueError( f"Missing keys: {[key for key in ['action', 'action_input'] if key not in tool_call]} in blob {tool_call}" ) def parse_text_tool_call(text: str) -> Tuple[str, Union[str, Dict[str, str]]]: """ Expects a text in the format: 'Action:', 'Action input:', 'Observation:'. 'Action input:' contains a json string with input arguments. """ try: if "Observation:" in text: text = text.split("Observation:")[0] if "Action:" in text: text = text.split("Action:")[1] tool_name, tool_input = text.split("Action input:") if "{" in tool_input: tool_input = parse_json_blob(tool_input) else: tool_input = tool_input.strip().replace('"', "") return tool_name.strip().replace('"', "").replace("\\", ""), tool_input except Exception as e: raise ValueError( f"Error in parsing the text tool call: {e}. Be sure to provide the correct format. DO NOT repeat your previous incorrect tool call." ) def to_text(input: Union[List[Dict[str, str]], Dict[str, str], str]) -> str: if isinstance(input, list): return "\n".join([m["content"] for m in input]) elif isinstance(input, dict): return input["content"] else: return input HUGGINGFACE_DEFAULT_TOOLS = {} _tools_are_initialized = False class Toolbox: """ The toolbox contains all tools that the agent can perform operations with, as well as a few methods to manage them. Args: tools (`List[Tool]`): The list of tools to instantiate the toolbox with add_base_tools (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to add the tools available within `transformers` to the toolbox. """ def __init__(self, tools: List[Tool], add_base_tools: bool = False): self._tools = {tool.name: tool for tool in tools} if add_base_tools: self.add_base_tools() self._load_tools_if_needed() def add_base_tools(self, add_python_interpreter: bool = False): global _tools_are_initialized global HUGGINGFACE_DEFAULT_TOOLS if not _tools_are_initialized: HUGGINGFACE_DEFAULT_TOOLS = setup_default_tools(logger) _tools_are_initialized = True for tool in HUGGINGFACE_DEFAULT_TOOLS.values(): if tool.name != "python_interpreter" or add_python_interpreter: self.add_tool(tool) self._load_tools_if_needed() @property def tools(self) -> Dict[str, Tool]: """Get all tools currently in the toolbox""" return self._tools def show_tool_descriptions(self, tool_description_template: str = None) -> str: """ Returns the description of all tools in the toolbox Args: tool_description_template (`str`, *optional*): The template to use to describe the tools. If not provided, the default template will be used. """ return "\n".join( [get_tool_description_with_args(tool, tool_description_template) for tool in self._tools.values()] ) def add_tool(self, tool: Tool): """ Adds a tool to the toolbox Args: tool (`Tool`): The tool to add to the toolbox. """ if tool.name in self._tools: raise KeyError(f"Error: tool '{tool.name}' already exists in the toolbox.") self._tools[tool.name] = tool def remove_tool(self, tool_name: str): """ Removes a tool from the toolbox Args: tool_name (`str`): The tool to remove from the toolbox. """ if tool_name not in self._tools: raise KeyError( f"Error: tool {tool_name} not found in toolbox for removal, should be instead one of {list(self._tools.keys())}." ) del self._tools[tool_name] def update_tool(self, tool: Tool): """ Updates a tool in the toolbox according to its name. Args: tool (`Tool`): The tool to update to the toolbox. """ if tool.name not in self._tools: raise KeyError( f"Error: tool {tool.name} not found in toolbox for update, should be instead one of {list(self._tools.keys())}." ) self._tools[tool.name] = tool def clear_toolbox(self): """Clears the toolbox""" self._tools = {} def _load_tools_if_needed(self): for name, tool in self._tools.items(): if not isinstance(tool, Tool): task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id self._tools[name] = load_tool(task_or_repo_id) def __repr__(self): toolbox_description = "Toolbox contents:\n" for tool in self._tools.values(): toolbox_description += f"\t{tool.name}: {tool.description}\n" return toolbox_description class AgentError(Exception): """Base class for other agent-related exceptions""" def __init__(self, message): super().__init__(message) self.message = message class AgentParsingError(AgentError): """Exception raised for errors in parsing in the agent""" pass class AgentExecutionError(AgentError): """Exception raised for errors in execution in the agent""" pass class AgentMaxIterationsError(AgentError): """Exception raised for errors in execution in the agent""" pass class AgentGenerationError(AgentError): """Exception raised for errors in generation in the agent""" pass def format_prompt_with_tools(toolbox: Toolbox, prompt_template: str, tool_description_template: str) -> str: tool_descriptions = toolbox.show_tool_descriptions(tool_description_template) prompt = prompt_template.replace("<<tool_descriptions>>", tool_descriptions) if "<<tool_names>>" in prompt: tool_names = [f"'{tool_name}'" for tool_name in toolbox.tools.keys()] prompt = prompt.replace("<<tool_names>>", ", ".join(tool_names)) return prompt def format_prompt_with_imports(prompt_template: str, authorized_imports: List[str]) -> str: if "<<authorized_imports>>" not in prompt_template: raise AgentError("Tag '<<authorized_imports>>' should be provided in the prompt.") return prompt_template.replace("<<authorized_imports>>", str(authorized_imports)) class Agent: def __init__( self, tools: Union[List[Tool], Toolbox], llm_engine: Callable = HfApiEngine(), system_prompt=DEFAULT_REACT_CODE_SYSTEM_PROMPT, tool_description_template=None, additional_args={}, max_iterations: int = 6, tool_parser=parse_json_tool_call, add_base_tools: bool = False, verbose: int = 0, memory_verbose: bool = False, grammar: Dict[str, str] = None, ): self.agent_name = self.__class__.__name__ self.llm_engine = llm_engine self.system_prompt_template = system_prompt self.tool_description_template = ( tool_description_template if tool_description_template else DEFAULT_TOOL_DESCRIPTION_TEMPLATE ) self.additional_args = additional_args self.max_iterations = max_iterations self.logger = logger self.tool_parser = tool_parser self.grammar = grammar if isinstance(tools, Toolbox): self._toolbox = tools if add_base_tools: if not is_torch_available(): raise ImportError("Using the base tools requires torch to be installed.") self._toolbox.add_base_tools(add_python_interpreter=(self.__class__ == ReactJsonAgent)) else: self._toolbox = Toolbox(tools, add_base_tools=add_base_tools) self._toolbox.add_tool(FinalAnswerTool()) self.system_prompt = format_prompt_with_tools( self._toolbox, self.system_prompt_template, self.tool_description_template ) self.prompt = None self.logs = [] self.task = None self.memory_verbose = memory_verbose if verbose == 0: logger.setLevel(logging.WARNING) elif verbose == 1: logger.setLevel(logging.INFO) elif verbose == 2: logger.setLevel(logging.DEBUG) @property def toolbox(self) -> Toolbox: """Get the toolbox currently available to the agent""" return self._toolbox def initialize_for_run(self): self.token_count = 0 self.system_prompt = format_prompt_with_tools( self._toolbox, self.system_prompt_template, self.tool_description_template, ) if hasattr(self, "authorized_imports"): self.system_prompt = format_prompt_with_imports( self.system_prompt, list(set(LIST_SAFE_MODULES) | set(self.authorized_imports)) ) self.logs = [{"system_prompt": self.system_prompt, "task": self.task}] self.logger.warn("======== New task ========") self.logger.log(33, self.task) self.logger.debug("System prompt is as follows:") self.logger.debug(self.system_prompt) def write_inner_memory_from_logs(self, summary_mode: Optional[bool] = False) -> List[Dict[str, str]]: """ Reads past llm_outputs, actions, and observations or errors from the logs into a series of messages that can be used as input to the LLM. """ prompt_message = {"role": MessageRole.SYSTEM, "content": self.logs[0]["system_prompt"]} task_message = { "role": MessageRole.USER, "content": "Task: " + self.logs[0]["task"], } if summary_mode: memory = [task_message] else: memory = [prompt_message, task_message] for i, step_log in enumerate(self.logs[1:]): if "llm_output" in step_log and not summary_mode: thought_message = {"role": MessageRole.ASSISTANT, "content": step_log["llm_output"].strip()} memory.append(thought_message) if "facts" in step_log: thought_message = { "role": MessageRole.ASSISTANT, "content": "[FACTS LIST]:\n" + step_log["facts"].strip(), } memory.append(thought_message) if "plan" in step_log and not summary_mode: thought_message = {"role": MessageRole.ASSISTANT, "content": "[PLAN]:\n" + step_log["plan"].strip()} memory.append(thought_message) if "tool_call" in step_log and summary_mode: tool_call_message = { "role": MessageRole.ASSISTANT, "content": f"[STEP {i} TOOL CALL]: " + str(step_log["tool_call"]).strip(), } memory.append(tool_call_message) if "task" in step_log: tool_call_message = { "role": MessageRole.USER, "content": "New task:\n" + step_log["task"], } memory.append(tool_call_message) if "error" in step_log or "observation" in step_log: if "error" in step_log: message_content = ( f"[OUTPUT OF STEP {i}] Error: " + str(step_log["error"]) + "\nNow let's retry: take care not to repeat previous errors! If you have retried several times, try a completely different approach.\n" ) elif "observation" in step_log: message_content = f"[OUTPUT OF STEP {i}] Observation:\n{step_log['observation']}" tool_response_message = {"role": MessageRole.TOOL_RESPONSE, "content": message_content} memory.append(tool_response_message) return memory def get_succinct_logs(self): return [{key: value for key, value in log.items() if key != "agent_memory"} for log in self.logs] def extract_action(self, llm_output: str, split_token: str) -> str: """ Parse action from the LLM output Args: llm_output (`str`): Output of the LLM split_token (`str`): Separator for the action. Should match the example in the system prompt. """ try: split = llm_output.split(split_token) rationale, action = ( split[-2], split[-1], ) # NOTE: using indexes starting from the end solves for when you have more than one split_token in the output except Exception as e: self.logger.error(e, exc_info=1) raise AgentParsingError( f"Error: No '{split_token}' token provided in your output.\nYour output:\n{llm_output}\n. Be sure to include an action, prefaced with '{split_token}'!" ) return rationale, action def execute_tool_call(self, tool_name: str, arguments: Dict[str, str]) -> Any: """ Execute tool with the provided input and returns the result. This method replaces arguments with the actual values from the state if they refer to state variables. Args: tool_name (`str`): Name of the Tool to execute (should be one from self.toolbox). arguments (Dict[str, str]): Arguments passed to the Tool. """ if tool_name not in self.toolbox.tools: error_msg = f"Error: unknown tool {tool_name}, should be instead one of {list(self.toolbox.tools.keys())}." self.logger.error(error_msg, exc_info=1) raise AgentExecutionError(error_msg) try: if isinstance(arguments, str): observation = self.toolbox.tools[tool_name](arguments) else: for key, value in arguments.items(): # if the value is the name of a state variable like "image.png", replace it with the actual value if isinstance(value, str) and value in self.state: arguments[key] = self.state[value] observation = self.toolbox.tools[tool_name](**arguments) return observation except Exception as e: raise AgentExecutionError( f"Error in tool call execution: {e}\nYou should only use this tool with a correct input.\n" f"As a reminder, this tool's description is the following:\n{get_tool_description_with_args(self.toolbox.tools[tool_name])}" ) def log_code_action(self, code_action: str) -> None: self.logger.warning("==== Agent is executing the code below:") if is_pygments_available(): self.logger.log( 31, highlight(code_action, PythonLexer(ensurenl=False), Terminal256Formatter(style="nord")) ) else: self.logger.log(31, code_action) self.logger.warning("====") def run(self, **kwargs): """To be implemented in the child class""" raise NotImplementedError class CodeAgent(Agent): """ A class for an agent that solves the given task using a single block of code. It plans all its actions, then executes all in one shot. """ def __init__( self, tools: List[Tool], llm_engine: Callable = HfApiEngine(), system_prompt: str = DEFAULT_CODE_SYSTEM_PROMPT, tool_description_template: str = DEFAULT_TOOL_DESCRIPTION_TEMPLATE, grammar: Dict[str, str] = None, additional_authorized_imports: Optional[List[str]] = None, **kwargs, ): super().__init__( tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, **kwargs, ) if not is_pygments_available(): transformers_logging.warning_once( logger, "pygments isn't installed. Installing pygments will enable color syntax highlighting in the " "CodeAgent.", ) self.python_evaluator = evaluate_python_code self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else [] self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(self.additional_authorized_imports)) self.system_prompt = self.system_prompt.replace("<<authorized_imports>>", str(self.authorized_imports)) def parse_code_blob(self, result: str) -> str: """ Override this method if you want to change the way the code is cleaned in the `run` method. """ return parse_code_blob(result) def run(self, task: str, return_generated_code: bool = False, **kwargs): """ Runs the agent for the given task. Args: task (`str`): The task to perform return_generated_code (`bool`, *optional*, defaults to `False`): Whether to return the generated code instead of running it kwargs (additional keyword arguments, *optional*): Any keyword argument to send to the agent when evaluating the code. Example: ```py from transformers.agents import CodeAgent, PythonInterpreterTool python_interpreter = PythonInterpreterTool() agent = CodeAgent(tools=[python_interpreter]) agent.run("What is the result of 2 power 3.7384?") ``` """ self.task = task if len(kwargs) > 0: self.task += f"\nYou have been provided with these initial arguments: {str(kwargs)}." self.state = kwargs.copy() self.initialize_for_run() # Run LLM prompt_message = {"role": MessageRole.SYSTEM, "content": self.system_prompt} task_message = { "role": MessageRole.USER, "content": "Task: " + self.task, } self.prompt = [prompt_message, task_message] self.logger.info("====Executing with this prompt====") self.logger.info(self.prompt) additional_args = {"grammar": self.grammar} if self.grammar is not None else {} llm_output = self.llm_engine(self.prompt, stop_sequences=["<end_action>"], **additional_args) if return_generated_code: return llm_output # Parse try: _, code_action = self.extract_action(llm_output=llm_output, split_token="Code:") except Exception as e: self.logger.debug( f"Error in extracting action, trying to parse the whole output as code. Error trace: {e}" ) code_action = llm_output try: code_action = self.parse_code_blob(code_action) except Exception as e: error_msg = f"Error in code parsing: {e}. Be sure to provide correct code" self.logger.error(error_msg, exc_info=1) return error_msg # Execute self.log_code_action(code_action) try: available_tools = {**BASE_PYTHON_TOOLS.copy(), **self.toolbox.tools} output = self.python_evaluator( code_action, static_tools=available_tools, custom_tools={}, state=self.state, authorized_imports=self.authorized_imports, ) self.logger.info(self.state["print_outputs"]) return output except Exception as e: error_msg = f"Error in execution: {e}. Be sure to provide correct code." self.logger.error(error_msg, exc_info=1) return error_msg class ReactAgent(Agent): """ This agent that solves the given task step by step, using the ReAct framework: While the objective is not reached, the agent will perform a cycle of thinking and acting. The action will be parsed from the LLM output: it consists in calls to tools from the toolbox, with arguments chosen by the LLM engine. """ def __init__( self, tools: List[Tool], llm_engine: Callable = HfApiEngine(), system_prompt: str = DEFAULT_REACT_CODE_SYSTEM_PROMPT, tool_description_template: str = DEFAULT_TOOL_DESCRIPTION_TEMPLATE, grammar: Dict[str, str] = None, plan_type: Literal[tuple(SUPPORTED_PLAN_TYPES)] = SUPPORTED_PLAN_TYPES[0], planning_interval: Optional[int] = None, **kwargs, ): assert plan_type in SUPPORTED_PLAN_TYPES, f"plan type {plan_type} is not supported" super().__init__( tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, **kwargs, ) self.planning_interval = planning_interval self.plan_type = plan_type def provide_final_answer(self, task) -> str: """ This method provides a final answer to the task, based on the logs of the agent's interactions. """ self.prompt = [ { "role": MessageRole.SYSTEM, "content": "An agent tried to answer an user query but it got stuck and failed to do so. You are tasked with providing an answer instead. Here is the agent's memory:", } ] self.prompt += self.write_inner_memory_from_logs()[1:] self.prompt += [ { "role": MessageRole.USER, "content": f"Based on the above, please provide an answer to the following user request:\n{task}", } ] try: return self.llm_engine(self.prompt) except Exception as e: return f"Error in generating final llm output: {e}." def run(self, task: str, stream: bool = False, reset: bool = True, **kwargs): """ Runs the agent for the given task. Args: task (`str`): The task to perform Example: ```py from transformers.agents import ReactCodeAgent agent = ReactCodeAgent(tools=[]) agent.run("What is the result of 2 power 3.7384?") ``` """ self.task = task if len(kwargs) > 0: self.task += f"\nYou have been provided with these initial arguments: {str(kwargs)}." self.state = kwargs.copy() if reset: self.initialize_for_run() else: self.logs.append({"task": task}) if stream: return self.stream_run(task) else: return self.direct_run(task) def stream_run(self, task: str): """ Runs the agent in streaming mode, yielding steps as they are executed: should be launched only in the `run` method. """ final_answer = None iteration = 0 while final_answer is None and iteration < self.max_iterations: try: step_logs = self.step() if "final_answer" in step_logs: final_answer = step_logs["final_answer"] except AgentError as e: self.logger.error(e, exc_info=1) self.logs[-1]["error"] = e finally: iteration += 1 yield self.logs[-1] if final_answer is None and iteration == self.max_iterations: error_message = "Reached max iterations." final_step_log = {"error": AgentMaxIterationsError(error_message)} self.logs.append(final_step_log) self.logger.error(error_message, exc_info=1) final_answer = self.provide_final_answer(task) final_step_log["final_answer"] = final_answer yield final_step_log yield final_answer def direct_run(self, task: str): """ Runs the agent in direct mode, returning outputs only at the end: should be launched only in the `run` method. """ final_answer = None iteration = 0 while final_answer is None and iteration < self.max_iterations: try: if self.planning_interval is not None and iteration % self.planning_interval == 0: self.planning_step(task, is_first_step=(iteration == 0), iteration=iteration) step_logs = self.step() if "final_answer" in step_logs: final_answer = step_logs["final_answer"] except AgentError as e: self.logger.error(e, exc_info=1) self.logs[-1]["error"] = e finally: iteration += 1 if final_answer is None and iteration == self.max_iterations: error_message = "Reached max iterations." final_step_log = {"error": AgentMaxIterationsError(error_message)} self.logs.append(final_step_log) self.logger.error(error_message, exc_info=1) final_answer = self.provide_final_answer(task) final_step_log["final_answer"] = final_answer return final_answer def planning_step(self, task, is_first_step: bool = False, iteration: int = None): """ Used periodically by the agent to plan the next steps to reach the objective. Args: task (`str`): The task to perform is_first_step (`bool`): If this step is not the first one, the plan should be an update over a previous plan. iteration (`int`): The number of the current step, used as an indication for the LLM. """ if is_first_step: message_prompt_facts = {"role": MessageRole.SYSTEM, "content": SYSTEM_PROMPT_FACTS} message_prompt_task = { "role": MessageRole.USER, "content": f"""Here is the task: ``` {task} ``` Now begin!""", } answer_facts = self.llm_engine([message_prompt_facts, message_prompt_task]) message_system_prompt_plan = { "role": MessageRole.SYSTEM, "content": PROMPTS_FOR_INITIAL_PLAN[self.plan_type]["system"], } message_user_prompt_plan = { "role": MessageRole.USER, "content": PROMPTS_FOR_INITIAL_PLAN[self.plan_type]["user"].format( task=task, tool_descriptions=self._toolbox.show_tool_descriptions(self.tool_description_template), answer_facts=answer_facts, ), } answer_plan = self.llm_engine( [message_system_prompt_plan, message_user_prompt_plan], stop_sequences=["<end_plan>"] ) final_plan_redaction = f"""Here is the plan of action that I will follow to solve the task: ``` {answer_plan} ```""" final_facts_redaction = f"""Here are the facts that I know so far: ``` {answer_facts} ```""".strip() self.logs.append({"plan": final_plan_redaction, "facts": final_facts_redaction}) self.logger.debug("===== Initial plan: =====") self.logger.debug(final_plan_redaction) else: # update plan agent_memory = self.write_inner_memory_from_logs( summary_mode=False ) # This will not log the plan but will log facts # Redact updated facts facts_update_system_prompt = { "role": MessageRole.SYSTEM, "content": SYSTEM_PROMPT_FACTS_UPDATE, } facts_update_message = { "role": MessageRole.USER, "content": USER_PROMPT_FACTS_UPDATE, } facts_update = self.llm_engine([facts_update_system_prompt] + agent_memory + [facts_update_message]) # Redact updated plan plan_update_message = { "role": MessageRole.SYSTEM, "content": PROMPTS_FOR_PLAN_UPDATE[self.plan_type]["system"].format(task=task), } plan_update_message_user = { "role": MessageRole.USER, "content": PROMPTS_FOR_PLAN_UPDATE[self.plan_type]["user"].format( task=task, tool_descriptions=self._toolbox.show_tool_descriptions(self.tool_description_template), facts_update=facts_update, remaining_steps=(self.max_iterations - iteration), ), } plan_update = self.llm_engine( [plan_update_message] + agent_memory + [plan_update_message_user], stop_sequences=["<end_plan>"] ) # Log final facts and plan final_plan_redaction = PLAN_UPDATE_FINAL_PLAN_REDACTION.format(task=task, plan_update=plan_update) final_facts_redaction = f"""Here is the updated list of the facts that I know: ``` {facts_update} ```""" self.logs.append({"plan": final_plan_redaction, "facts": final_facts_redaction}) self.logger.debug("===== Updated plan: =====") self.logger.debug(final_plan_redaction) class ReactJsonAgent(ReactAgent): """ This agent that solves the given task step by step, using the ReAct framework: While the objective is not reached, the agent will perform a cycle of thinking and acting. The tool calls will be formulated by the LLM in JSON format, then parsed and executed. """ def __init__( self, tools: List[Tool], llm_engine: Callable = HfApiEngine(), system_prompt: str = DEFAULT_REACT_JSON_SYSTEM_PROMPT, tool_description_template: str = DEFAULT_TOOL_DESCRIPTION_TEMPLATE, grammar: Dict[str, str] = None, planning_interval: Optional[int] = None, **kwargs, ): super().__init__( tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, planning_interval=planning_interval, **kwargs, ) def step(self): """ Perform one step in the ReAct framework: the agent thinks, acts, and observes the result. The errors are raised here, they are caught and logged in the run() method. """ agent_memory = self.write_inner_memory_from_logs() self.prompt = agent_memory self.logger.debug("===== New step =====") # Add new step in logs current_step_logs = {} self.logs.append(current_step_logs) current_step_logs["agent_memory"] = agent_memory.copy() self.logger.info("===== Calling LLM with this last message: =====") self.logger.info(self.prompt[-1]) try: additional_args = {"grammar": self.grammar} if self.grammar is not None else {} llm_output = self.llm_engine( self.prompt, stop_sequences=["<end_action>", "Observation:"], **additional_args ) except Exception as e: raise AgentGenerationError(f"Error in generating llm output: {e}.") self.logger.debug("===== Output message of the LLM: =====") self.logger.debug(llm_output) current_step_logs["llm_output"] = llm_output # Parse self.logger.debug("===== Extracting action =====") rationale, action = self.extract_action(llm_output=llm_output, split_token="Action:") try: tool_name, arguments = self.tool_parser(action) except Exception as e: raise AgentParsingError(f"Could not parse the given action: {e}.") current_step_logs["rationale"] = rationale current_step_logs["tool_call"] = {"tool_name": tool_name, "tool_arguments": arguments} # Execute self.logger.warning(f"Calling tool: '{tool_name}' with arguments: {arguments}") if tool_name == "final_answer": if isinstance(arguments, dict): if "answer" in arguments: answer = arguments["answer"] if ( isinstance(answer, str) and answer in self.state.keys() ): # if the answer is a state variable, return the value answer = self.state[answer] else: answer = arguments else: answer = arguments current_step_logs["final_answer"] = answer return current_step_logs else: observation = self.execute_tool_call(tool_name, arguments) observation_type = type(observation) if observation_type == AgentText: updated_information = str(observation).strip() else: # TODO: observation naming could allow for different names of same type if observation_type == AgentImage: observation_name = "image.png" elif observation_type == AgentAudio: observation_name = "audio.mp3" else: observation_name = "object.object" self.state[observation_name] = observation updated_information = f"Stored '{observation_name}' in memory." self.logger.info(updated_information) current_step_logs["observation"] = updated_information return current_step_logs class ReactCodeAgent(ReactAgent): """ This agent that solves the given task step by step, using the ReAct framework: While the objective is not reached, the agent will perform a cycle of thinking and acting. The tool calls will be formulated by the LLM in code format, then parsed and executed. """ def __init__( self, tools: List[Tool], llm_engine: Callable = HfApiEngine(), system_prompt: str = DEFAULT_REACT_CODE_SYSTEM_PROMPT, tool_description_template: str = DEFAULT_TOOL_DESCRIPTION_TEMPLATE, grammar: Dict[str, str] = None, additional_authorized_imports: Optional[List[str]] = None, planning_interval: Optional[int] = None, **kwargs, ): super().__init__( tools=tools, llm_engine=llm_engine, system_prompt=system_prompt, tool_description_template=tool_description_template, grammar=grammar, planning_interval=planning_interval, **kwargs, ) if not is_pygments_available(): transformers_logging.warning_once( logger, "pygments isn't installed. Installing pygments will enable color syntax highlighting in the " "ReactCodeAgent.", ) self.python_evaluator = evaluate_python_code self.additional_authorized_imports = additional_authorized_imports if additional_authorized_imports else [] self.authorized_imports = list(set(LIST_SAFE_MODULES) | set(self.additional_authorized_imports)) self.system_prompt = self.system_prompt.replace("<<authorized_imports>>", str(self.authorized_imports)) self.custom_tools = {} def step(self): """ Perform one step in the ReAct framework: the agent thinks, acts, and observes the result. The errors are raised here, they are caught and logged in the run() method. """ agent_memory = self.write_inner_memory_from_logs() self.prompt = agent_memory.copy() self.logger.debug("===== New step =====") # Add new step in logs current_step_logs = {} self.logs.append(current_step_logs) current_step_logs["agent_memory"] = agent_memory.copy() self.logger.info("===== Calling LLM with these last messages: =====") self.logger.info(self.prompt[-2:]) try: additional_args = {"grammar": self.grammar} if self.grammar is not None else {} llm_output = self.llm_engine( self.prompt, stop_sequences=["<end_action>", "Observation:"], **additional_args ) except Exception as e: raise AgentGenerationError(f"Error in generating llm output: {e}.") self.logger.debug("===== Output message of the LLM: =====") self.logger.debug(llm_output) current_step_logs["llm_output"] = llm_output # Parse self.logger.debug("===== Extracting action =====") try: rationale, raw_code_action = self.extract_action(llm_output=llm_output, split_token="Code:") except Exception as e: self.logger.debug(f"Error in extracting action, trying to parse the whole output. Error trace: {e}") rationale, raw_code_action = llm_output, llm_output try: code_action = parse_code_blob(raw_code_action) except Exception as e: error_msg = f"Error in code parsing: {e}. Make sure to provide correct code" raise AgentParsingError(error_msg) current_step_logs["rationale"] = rationale current_step_logs["tool_call"] = {"tool_name": "code interpreter", "tool_arguments": code_action} # Execute self.log_code_action(code_action) try: result = self.python_evaluator( code_action, static_tools={ **BASE_PYTHON_TOOLS.copy(), **self.toolbox.tools, }, custom_tools=self.custom_tools, state=self.state, authorized_imports=self.authorized_imports, ) information = self.state["print_outputs"] self.logger.warning("Print outputs:") self.logger.log(32, information) current_step_logs["observation"] = information except Exception as e: error_msg = f"Code execution failed due to the following error:\n{str(e)}" if "'dict' object has no attribute 'read'" in str(e): error_msg += "\nYou get this error because you passed a dict as input for one of the arguments instead of a string." raise AgentExecutionError(error_msg) for line in code_action.split("\n"): if line[: len("final_answer")] == "final_answer": self.logger.warning(">>> Final answer:") self.logger.log(32, result) current_step_logs["final_answer"] = result return current_step_logs
transformers/src/transformers/agents/agents.py/0
{ "file_path": "transformers/src/transformers/agents/agents.py", "repo_id": "transformers", "token_count": 19324 }
334
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Tuple from ..utils import ( cached_property, is_torch_available, is_torch_xla_available, is_torch_xpu_available, logging, requires_backends, ) from .benchmark_args_utils import BenchmarkArguments if is_torch_available(): import torch if is_torch_xla_available(): import torch_xla.core.xla_model as xm logger = logging.get_logger(__name__) @dataclass class PyTorchBenchmarkArguments(BenchmarkArguments): deprecated_args = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__(self, **kwargs): """ This __init__ is there for legacy code. When removing deprecated args completely, the class can simply be deleted """ for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: positive_arg = deprecated_arg[3:] setattr(self, positive_arg, not kwargs.pop(deprecated_arg)) logger.warning( f"{deprecated_arg} is depreciated. Please use --no_{positive_arg} or" f" {positive_arg}={kwargs[positive_arg]}" ) self.torchscript = kwargs.pop("torchscript", self.torchscript) self.torch_xla_tpu_print_metrics = kwargs.pop("torch_xla_tpu_print_metrics", self.torch_xla_tpu_print_metrics) self.fp16_opt_level = kwargs.pop("fp16_opt_level", self.fp16_opt_level) super().__init__(**kwargs) torchscript: bool = field(default=False, metadata={"help": "Trace the models using torchscript"}) torch_xla_tpu_print_metrics: bool = field(default=False, metadata={"help": "Print Xla/PyTorch tpu metrics"}) fp16_opt_level: str = field( default="O1", metadata={ "help": ( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ) }, ) @cached_property def _setup_devices(self) -> Tuple["torch.device", int]: requires_backends(self, ["torch"]) logger.info("PyTorch: setting up devices") if not self.cuda: device = torch.device("cpu") n_gpu = 0 elif is_torch_xla_available(): device = xm.xla_device() n_gpu = 0 elif is_torch_xpu_available(): device = torch.device("xpu") n_gpu = torch.xpu.device_count() else: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() return device, n_gpu @property def is_tpu(self): return is_torch_xla_available() and self.tpu @property def device_idx(self) -> int: requires_backends(self, ["torch"]) # TODO(PVP): currently only single GPU is supported return torch.cuda.current_device() @property def device(self) -> "torch.device": requires_backends(self, ["torch"]) return self._setup_devices[0] @property def n_gpu(self): requires_backends(self, ["torch"]) return self._setup_devices[1] @property def is_gpu(self): return self.n_gpu > 0
transformers/src/transformers/benchmark/benchmark_args.py/0
{ "file_path": "transformers/src/transformers/benchmark/benchmark_args.py", "repo_id": "transformers", "token_count": 1758 }
335
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def main(): parser = ArgumentParser("Transformers CLI tool", usage="transformers-cli <command> [<args>]") commands_parser = parser.add_subparsers(help="transformers-cli command helpers") # Register commands ConvertCommand.register_subcommand(commands_parser) DownloadCommand.register_subcommand(commands_parser) EnvironmentCommand.register_subcommand(commands_parser) RunCommand.register_subcommand(commands_parser) ServeCommand.register_subcommand(commands_parser) UserCommands.register_subcommand(commands_parser) AddNewModelLikeCommand.register_subcommand(commands_parser) LfsCommands.register_subcommand(commands_parser) PTtoTFCommand.register_subcommand(commands_parser) # Let's go args = parser.parse_args() if not hasattr(args, "func"): parser.print_help() exit(1) # Run service = args.func(args) service.run() if __name__ == "__main__": main()
transformers/src/transformers/commands/transformers_cli.py/0
{ "file_path": "transformers/src/transformers/commands/transformers_cli.py", "repo_id": "transformers", "token_count": 593 }
336
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
transformers/src/transformers/data/processors/__init__.py/0
{ "file_path": "transformers/src/transformers/data/processors/__init__.py", "repo_id": "transformers", "token_count": 287 }
337
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple import torch from ..cache_utils import DynamicCache from ..pytorch_utils import isin_mps_friendly from .logits_process import LogitsProcessorList, MinLengthLogitsProcessor if TYPE_CHECKING: from ..modeling_utils import PreTrainedModel from .configuration_utils import GenerationConfig class CandidateGenerator: """Abstract base class for all candidate generators that can be applied during assisted generation.""" def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]: """ Fetches the candidates to be tried for the current input. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) Return: `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be assessed by the model and, optionally, a `torch.FloatTensor` of shape `(batch_size, candidate_length, vocabulary_size)` containing the logits associated to each candidate. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can call `get_candidates`." ) def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int): """ Updates the candidate generation strategy based on the outcomes. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search num_matches (`int`): The number of matches between the candidate sequences and the model predictions. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can call " "`update_candidate_strategy`." ) class AssistedCandidateGenerator(CandidateGenerator): """ `CandidateGenerator` class to be used for assisted generation and speculative decoding. This class generates candidates through the use of a smaller model. Read the following blog post for more information: https://huggingface.co/blog/assisted-generation Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) assistant_model (`PreTrainedModel`): The model to be used for generating candidates. This model should be smaller than the main model. generation_config (`~generation.GenerationConfig`, *optional*): The generation configuration to be used as base parametrization for the generation call. logits_processor (`LogitsProcessorList`): An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] used to modify the prediction scores of the language modeling head applied at each generation step. model_kwargs (`Dict`): The keyword arguments that will be passed to the main model, and are used as base inputs for the assistant model as well. inputs_tensor (`torch.Tensor`, *optional*): The model input tensor. In encoder-decoder models, this is the encoder input. """ def __init__( self, input_ids: torch.LongTensor, assistant_model: "PreTrainedModel", generation_config: "GenerationConfig", model_kwargs: Dict, inputs_tensor: Optional[torch.Tensor] = None, logits_processor: "LogitsProcessorList" = None, ): # Make sure all data at the same device as assistant model device = assistant_model.device input_ids = input_ids.to(device) if inputs_tensor is not None: inputs_tensor = inputs_tensor.to(device) # Prepare the assistant and the starting number of candidate tokens self.assistant_model = assistant_model self.num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens # Set eos in assistant same as in target model self.assistant_model.generation_config.eos_token_id = generation_config.eos_token_id # Prepare the kwargs for the assistant model assistant_kwargs = {} for key, value in model_kwargs.items(): # deepcopy crashes if we attempt to copy encoder outputs with grads if key not in ("encoder_outputs", "assistant_encoder_outputs", "past_key_values"): assistant_kwargs[key] = ( value.detach().to(device) if isinstance(value, torch.Tensor) else copy.deepcopy(value) ) # Remove potential default "num_logits_to_keep" key if "num_logits_to_keep" in assistant_kwargs.keys() and not assistant_model._supports_num_logits_to_keep(): del assistant_kwargs["num_logits_to_keep"] if "assistant_encoder_outputs" in model_kwargs: assistant_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"] elif assistant_model.config.is_encoder_decoder: inputs_tensor, model_input_name, assistant_kwargs = assistant_model._prepare_model_inputs( inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_kwargs ) assistant_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation( inputs_tensor, assistant_kwargs, model_input_name, assistant_model.generation_config ) elif "encoder_outputs" in model_kwargs: assistant_kwargs["encoder_outputs"] = model_kwargs["encoder_outputs"] self.assistant_kwargs = assistant_kwargs # Prepare assistant model's keys of inputs if assistant_model.config.is_encoder_decoder: # both are encoder-decoder self.input_ids_key = "decoder_input_ids" elif "encoder_outputs" in assistant_kwargs: # special case for encoder-decoder with decoder-only assistant (like DistilWhisper) self.input_ids_key = "input_ids" self.assistant_kwargs["attention_mask"] = self.assistant_kwargs.get( "decoder_attention_mask", torch.ones((input_ids.shape[0], 1), device=input_ids.device, dtype=torch.long), ) else: # both are decoder-only self.input_ids_key = "input_ids" # Prepare generation-related options. self.logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() self.generation_config = copy.deepcopy(generation_config) self.generation_config.return_dict_in_generate = True self.generation_config.output_scores = True # Disable sampling -- this implementation of assisted generation/speculative decoding uses the assistant # greedily to maximize matches. Disables sampling-related flags to prevent warnings self.generation_config.do_sample = False for attr in ("temperature", "top_p", "min_p", "typical_p", "top_k", "epsilon_cutoff", "eta_cutoff"): setattr(self.generation_config, attr, None) # avoid unnecessary warnings that min_length is larger than max_new_tokens # remove the `MinLengthLogitsProcessor` if exists (NOTE: no need to check for `MinNewTokensLogitsProcessor`) self.main_model_min_length = self.generation_config.min_length self.generation_config.min_length = 0 self.generation_config.min_new_tokens = None for processor in self.logits_processor: if isinstance(processor, MinLengthLogitsProcessor): raise ValueError( "Passing `MinLengthLogitsProcessor` when using `assisted_generation is disabled. " "Please pass in `min_length` into `.generate()` instead" ) # We need to roll back the cache in assisted generation, only DynamicCache is supported self.generation_config.cache_implementation = None def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]: """ Fetches the candidates to be tried for the current input. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) Return: `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be assessed by the model and a `torch.FloatTensor` of shape `(batch_size, candidate_length, vocabulary_size)` containing the logits associated to each candidate. """ input_ids = input_ids.to(self.assistant_model.device) # Don't generate more than `max_length - 1` candidates since the target model generates one extra token. new_cur_len = input_ids.shape[-1] max_new_tokens = min(int(self.num_assistant_tokens), self.generation_config.max_length - new_cur_len - 1) min_new_tokens = max(min(max_new_tokens, self.main_model_min_length - new_cur_len), 0) if max_new_tokens == 0: return input_ids, None # 1. If it is not the first round of candidate generation, prepare the inputs based on the input_ids length # (which implicitly contains the number of accepted candidates from the previous round) has_past_key_values = self.assistant_kwargs.get("past_key_values", None) is not None if has_past_key_values: new_cache_size = new_cur_len - 1 self.assistant_kwargs["past_key_values"] = _crop_past_key_values( self.assistant_model, self.assistant_kwargs["past_key_values"], new_cache_size - 1 ) # the assistant does not have the token after the last match, hence the -1 self.assistant_kwargs = _prepare_attention_mask( self.assistant_kwargs, new_cur_len, self.assistant_model.config.is_encoder_decoder ) self.assistant_kwargs = _prepare_token_type_ids(self.assistant_kwargs, new_cur_len) # 2. Forecast next N tokens using the assistant model. assistant_generation_kwargs = { self.input_ids_key: input_ids, "min_new_tokens": min_new_tokens, "max_new_tokens": max_new_tokens, "generation_config": self.generation_config, "logits_processor": self.logits_processor, } assistant_output = self.assistant_model.generate(**assistant_generation_kwargs, **self.assistant_kwargs) # 3. Update variables for the next round of candidate generation self.assistant_kwargs["past_key_values"] = assistant_output.past_key_values # 4. Prepare variables for output candidate_logits = torch.stack(assistant_output.scores, dim=1) candidate_ids = assistant_output.sequences return candidate_ids, candidate_logits def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int): """ Updates the candidate generation strategy based on the outcomes. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search num_matches (`int`): The number of matches between the candidate sequences and the model predictions. """ # Adjust the max number of assistant tokens to use in the next iteration. This is a simple heuristic, # probably can be improved -- we want to balance the benefits of getting assistant tokens correct with the # cost of forecasting incorrect assistant tokens. if self.assistant_model.generation_config.num_assistant_tokens_schedule in { "heuristic", "heuristic_transient", }: if num_matches == int(self.num_assistant_tokens): self.num_assistant_tokens += 2.0 else: self.num_assistant_tokens = max(1.0, self.num_assistant_tokens - 1.0) class PromptLookupCandidateGenerator(CandidateGenerator): """ `CandidateGenerator` class to be used for prompt lookup generation. This class generates candidates by looking up likely continuations in the provided prompt (input_ids) itself. Read the following blog post for more information: https://github.com/apoorvumang/prompt-lookup-decoding Args: max_matching_ngram_size (`int`): The maximum ngram size to be considered for matching in the prompt num_output_tokens (`int`): The number of tokens to be output as candidate tokens. max_length (`int`): The number of total maximum tokens that can be generated. For decoder-only models that includes the prompt length. Defaults to 20, which is the max length used as default in generation config. """ def __init__( self, eos_token_id: torch.Tensor = None, num_output_tokens: int = 10, max_matching_ngram_size: int = None, max_length: int = 20, ): self.num_output_tokens = num_output_tokens self.max_matching_ngram_size = max_matching_ngram_size if max_matching_ngram_size else 2 self.max_length = max_length self.eos_token_id = eos_token_id if self.max_matching_ngram_size <= 0 or self.num_output_tokens <= 0: raise ValueError("Invalid max_matching_ngram_size or num_output_tokens") def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]: """ Fetches the candidates to be tried for the current input. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) Return: `torch.LongTensor` of shape `(num_candidates, candidate_length)`: The candidate sequences to be tried. """ input_length = input_ids.size(1) # Don't generate more than `max_length - 1` candidates since the target model generates one extra token. if self.max_length == input_length + 1: return input_ids, None chosen_ids = None match_found = False for ngram_size in range(min(self.max_matching_ngram_size, input_length - 1), 0, -1): # Create sliding windows of size ngram_size windows = input_ids.unfold(dimension=1, size=ngram_size, step=1) # Convert ngram to a tensor for comparison ngram_tensor = input_ids[0, -ngram_size:] # Find where the windows match the ngram matches = (windows == ngram_tensor).all(dim=2) # Get the indices of matches match_indices = matches.nonzero(as_tuple=True)[1] # Iterate through match indices to find a valid continuation for idx in match_indices: start_idx = idx + ngram_size end_idx = start_idx + self.num_output_tokens end_idx = min(end_idx, input_length, self.max_length) if start_idx < end_idx: chosen_ids = input_ids[0, start_idx:end_idx] match_found = True # remove remaining candidate ids if an "eos" token is found, otherwise the target model may # accept eos and the rest as valid, thus not stopping generation after "eos" # NOTE: below code is written based on the fact that assisted decoding supports only bs=1 mask = isin_mps_friendly(chosen_ids, self.eos_token_id) match_indices_eos = torch.nonzero(mask) if match_indices_eos.numel() > 0: first_eos_index = match_indices_eos[0].item() chosen_ids = chosen_ids[:first_eos_index] break if match_found: break if chosen_ids is None or len(chosen_ids) == 0: # In case we didn't find a match return the input sequence unchanged, reverts back to autoregressive decoding return input_ids, None # Now need extend input_ids with chosen_ids chosen_ids = chosen_ids.unsqueeze(0) candidate_input_ids = torch.cat((input_ids, chosen_ids), dim=1) # assisted_generation expects logits as well, but we don't have those here, so returning None return candidate_input_ids, None def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int): """ Updates the candidate generation strategy based on the outcomes. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`): Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam search or log softmax for each vocabulary token when using beam search num_matches (`int`): The number of matches between the candidate sequences and the model predictions. """ # Currently does nothing return def _crop_past_key_values(model, past_key_values, max_length): """Crops the past key values up to a certain maximum length.""" new_past = [] if model.config.is_encoder_decoder: for idx in range(len(past_key_values)): new_past.append( ( past_key_values[idx][0][:, :, :max_length, :], past_key_values[idx][1][:, :, :max_length, :], past_key_values[idx][2], past_key_values[idx][3], ) ) past_key_values = tuple(new_past) # gptbigcode is special and stores kv in shape (batch_size, seq_len, dim), if it's a multi_query model elif "gptbigcode" in model.__class__.__name__.lower() or ( model.config.architectures is not None and "gptbigcode" in model.config.architectures[0].lower() ): if model.config.multi_query: for idx in range(len(past_key_values)): past_key_values[idx] = past_key_values[idx][:, :max_length, :] else: for idx in range(len(past_key_values)): past_key_values[idx] = past_key_values[idx][:, :, :max_length, :] elif isinstance(past_key_values, DynamicCache): past_key_values.crop(max_length) elif past_key_values is not None: for idx in range(len(past_key_values)): new_past.append( ( past_key_values[idx][0][:, :, :max_length, :], past_key_values[idx][1][:, :, :max_length, :], ) ) past_key_values = tuple(new_past) return past_key_values def _prepare_attention_mask(model_kwargs: Dict[str, Any], new_length: int, is_encoder_decoder: bool) -> Dict[str, Any]: """Expands or crops the model's mask for decoding purposes, to the defined length""" mask_key = "decoder_attention_mask" if is_encoder_decoder else "attention_mask" if mask_key not in model_kwargs: return model_kwargs mask = model_kwargs[mask_key] mask_length_diff = new_length - mask.shape[1] if mask_length_diff < 0: model_kwargs[mask_key] = mask[:, :mask_length_diff] elif mask_length_diff > 0: model_kwargs[mask_key] = torch.cat([mask, mask.new_ones((mask.shape[0], mask_length_diff))], dim=-1) return model_kwargs def _prepare_token_type_ids(model_kwargs: Dict[str, Any], new_length: int) -> Dict[str, Any]: """Expands or crops the model's token_type_ids for decoding purposes, to the defined length""" if "token_type_ids" not in model_kwargs or model_kwargs["token_type_ids"] is None: return model_kwargs token_type_ids = model_kwargs["token_type_ids"] final_token_type = token_type_ids[:, -1].unsqueeze(-1) type_length_diff = new_length - token_type_ids.shape[1] if type_length_diff < 0: token_type_ids = token_type_ids[:, :type_length_diff] elif type_length_diff > 0: token_type_copies = final_token_type.repeat(1, type_length_diff) model_kwargs["token_type_ids"] = torch.cat([model_kwargs["token_type_ids"], token_type_copies], dim=-1) return model_kwargs
transformers/src/transformers/generation/candidate_generator.py/0
{ "file_path": "transformers/src/transformers/generation/candidate_generator.py", "repo_id": "transformers", "token_count": 9061 }
338
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from math import ceil from typing import Iterable, List, Optional, Tuple, Union import numpy as np from .image_utils import ( ChannelDimension, ImageInput, get_channel_dimension_axis, get_image_size, infer_channel_dimension_format, ) from .utils import ExplicitEnum, TensorType, is_jax_tensor, is_tf_tensor, is_torch_tensor from .utils.import_utils import ( is_flax_available, is_tf_available, is_torch_available, is_torchvision_available, is_vision_available, requires_backends, ) if is_vision_available(): import PIL from .image_utils import PILImageResampling if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_flax_available(): import jax.numpy as jnp if is_torchvision_available(): from torchvision.transforms import functional as F def to_channel_dimension_format( image: np.ndarray, channel_dim: Union[ChannelDimension, str], input_channel_dim: Optional[Union[ChannelDimension, str]] = None, ) -> np.ndarray: """ Converts `image` to the channel dimension format specified by `channel_dim`. Args: image (`numpy.ndarray`): The image to have its channel dimension set. channel_dim (`ChannelDimension`): The channel dimension format to use. input_channel_dim (`ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. Returns: `np.ndarray`: The image with the channel dimension set to `channel_dim`. """ if not isinstance(image, np.ndarray): raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}") if input_channel_dim is None: input_channel_dim = infer_channel_dimension_format(image) target_channel_dim = ChannelDimension(channel_dim) if input_channel_dim == target_channel_dim: return image if target_channel_dim == ChannelDimension.FIRST: image = image.transpose((2, 0, 1)) elif target_channel_dim == ChannelDimension.LAST: image = image.transpose((1, 2, 0)) else: raise ValueError("Unsupported channel dimension format: {}".format(channel_dim)) return image def rescale( image: np.ndarray, scale: float, data_format: Optional[ChannelDimension] = None, dtype: np.dtype = np.float32, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Rescales `image` by `scale`. Args: image (`np.ndarray`): The image to rescale. scale (`float`): The scale to use for rescaling the image. data_format (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. dtype (`np.dtype`, *optional*, defaults to `np.float32`): The dtype of the output image. Defaults to `np.float32`. Used for backwards compatibility with feature extractors. input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input image. If not provided, it will be inferred from the input image. Returns: `np.ndarray`: The rescaled image. """ if not isinstance(image, np.ndarray): raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}") rescaled_image = image * scale if data_format is not None: rescaled_image = to_channel_dimension_format(rescaled_image, data_format, input_data_format) rescaled_image = rescaled_image.astype(dtype) return rescaled_image def _rescale_for_pil_conversion(image): """ Detects whether or not the image needs to be rescaled before being converted to a PIL image. The assumption is that if the image is of type `np.float` and all values are between 0 and 1, it needs to be rescaled. """ if image.dtype == np.uint8: do_rescale = False elif np.allclose(image, image.astype(int)): if np.all(0 <= image) and np.all(image <= 255): do_rescale = False else: raise ValueError( "The image to be converted to a PIL image contains values outside the range [0, 255], " f"got [{image.min()}, {image.max()}] which cannot be converted to uint8." ) elif np.all(0 <= image) and np.all(image <= 1): do_rescale = True else: raise ValueError( "The image to be converted to a PIL image contains values outside the range [0, 1], " f"got [{image.min()}, {image.max()}] which cannot be converted to uint8." ) return do_rescale def to_pil_image( image: Union[np.ndarray, "PIL.Image.Image", "torch.Tensor", "tf.Tensor", "jnp.ndarray"], do_rescale: Optional[bool] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> "PIL.Image.Image": """ Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if needed. Args: image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`): The image to convert to the `PIL.Image` format. do_rescale (`bool`, *optional*): Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default to `True` if the image type is a floating type and casting to `int` would result in a loss of precision, and `False` otherwise. input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. Returns: `PIL.Image.Image`: The converted image. """ requires_backends(to_pil_image, ["vision"]) if isinstance(image, PIL.Image.Image): return image # Convert all tensors to numpy arrays before converting to PIL image if is_torch_tensor(image) or is_tf_tensor(image): image = image.numpy() elif is_jax_tensor(image): image = np.array(image) elif not isinstance(image, np.ndarray): raise ValueError("Input image type not supported: {}".format(type(image))) # If the channel has been moved to first dim, we put it back at the end. image = to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) # If there is a single channel, we squeeze it, as otherwise PIL can't handle it. image = np.squeeze(image, axis=-1) if image.shape[-1] == 1 else image # PIL.Image can only store uint8 values so we rescale the image to be between 0 and 255 if needed. do_rescale = _rescale_for_pil_conversion(image) if do_rescale is None else do_rescale if do_rescale: image = rescale(image, 255) image = image.astype(np.uint8) return PIL.Image.fromarray(image) # Logic adapted from torchvision resizing logic: https://github.com/pytorch/vision/blob/511924c1ced4ce0461197e5caa64ce5b9e558aab/torchvision/transforms/functional.py#L366 def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], default_to_square: bool = True, max_size: Optional[int] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> tuple: """ Find the target (height, width) dimension of the output image after resizing given the input image and the desired size. Args: input_image (`np.ndarray`): The image to resize. size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`): The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to this. If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this number. i.e, if height > width, then image will be rescaled to (size * height / width, size). default_to_square (`bool`, *optional*, defaults to `True`): How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square (`size`,`size`). If set to `False`, will replicate [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize) with support for resizing only the smallest edge and providing an optional `max_size`. max_size (`int`, *optional*): The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater than `max_size` after being resized according to `size`, then the image is resized again so that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter than `size`. Only used if `default_to_square` is `False`. input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. Returns: `tuple`: The target (height, width) dimension of the output image after resizing. """ if isinstance(size, (tuple, list)): if len(size) == 2: return tuple(size) elif len(size) == 1: # Perform same logic as if size was an int size = size[0] else: raise ValueError("size must have 1 or 2 elements if it is a list or tuple") if default_to_square: return (size, size) height, width = get_image_size(input_image, input_data_format) short, long = (width, height) if width <= height else (height, width) requested_new_short = size new_short, new_long = requested_new_short, int(requested_new_short * long / short) if max_size is not None: if max_size <= requested_new_short: raise ValueError( f"max_size = {max_size} must be strictly greater than the requested " f"size for the smaller edge size = {size}" ) if new_long > max_size: new_short, new_long = int(max_size * new_short / new_long), max_size return (new_long, new_short) if width <= height else (new_short, new_long) def resize( image: np.ndarray, size: Tuple[int, int], resample: "PILImageResampling" = None, reducing_gap: Optional[int] = None, data_format: Optional[ChannelDimension] = None, return_numpy: bool = True, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Resizes `image` to `(height, width)` specified by `size` using the PIL library. Args: image (`np.ndarray`): The image to resize. size (`Tuple[int, int]`): The size to use for resizing the image. resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`): The filter to user for resampling. reducing_gap (`int`, *optional*): Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to the fair resampling. See corresponding Pillow documentation for more details. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output image. If unset, will use the inferred format from the input. return_numpy (`bool`, *optional*, defaults to `True`): Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is returned. input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. Returns: `np.ndarray`: The resized image. """ requires_backends(resize, ["vision"]) resample = resample if resample is not None else PILImageResampling.BILINEAR if not len(size) == 2: raise ValueError("size must have 2 elements") # For all transformations, we want to keep the same data format as the input image unless otherwise specified. # The resized image from PIL will always have channels last, so find the input format first. if input_data_format is None: input_data_format = infer_channel_dimension_format(image) data_format = input_data_format if data_format is None else data_format # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use # the pillow library to resize the image and then convert back to numpy do_rescale = False if not isinstance(image, PIL.Image.Image): do_rescale = _rescale_for_pil_conversion(image) image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format) height, width = size # PIL images are in the format (width, height) resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap) if return_numpy: resized_image = np.array(resized_image) # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image # so we need to add it back if necessary. resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image # The image is always in channels last format after converting from a PIL image resized_image = to_channel_dimension_format( resized_image, data_format, input_channel_dim=ChannelDimension.LAST ) # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to # rescale it back to the original range. resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image return resized_image def normalize( image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Normalizes `image` using the mean and standard deviation specified by `mean` and `std`. image = (image - mean) / std Args: image (`np.ndarray`): The image to normalize. mean (`float` or `Iterable[float]`): The mean to use for normalization. std (`float` or `Iterable[float]`): The standard deviation to use for normalization. data_format (`ChannelDimension`, *optional*): The channel dimension format of the output image. If unset, will use the inferred format from the input. input_data_format (`ChannelDimension`, *optional*): The channel dimension format of the input image. If unset, will use the inferred format from the input. """ if not isinstance(image, np.ndarray): raise ValueError("image must be a numpy array") if input_data_format is None: input_data_format = infer_channel_dimension_format(image) channel_axis = get_channel_dimension_axis(image, input_data_format=input_data_format) num_channels = image.shape[channel_axis] # We cast to float32 to avoid errors that can occur when subtracting uint8 values. # We preserve the original dtype if it is a float type to prevent upcasting float16. if not np.issubdtype(image.dtype, np.floating): image = image.astype(np.float32) if isinstance(mean, Iterable): if len(mean) != num_channels: raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(mean)}") else: mean = [mean] * num_channels mean = np.array(mean, dtype=image.dtype) if isinstance(std, Iterable): if len(std) != num_channels: raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(std)}") else: std = [std] * num_channels std = np.array(std, dtype=image.dtype) if input_data_format == ChannelDimension.LAST: image = (image - mean) / std else: image = ((image.T - mean) / std).T image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image return image def center_crop( image: np.ndarray, size: Tuple[int, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, return_numpy: Optional[bool] = None, ) -> np.ndarray: """ Crops the `image` to the specified `size` using a center crop. Note that if the image is too small to be cropped to the size given, it will be padded (so the returned result will always be of size `size`). Args: image (`np.ndarray`): The image to crop. size (`Tuple[int, int]`): The target size for the cropped image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. return_numpy (`bool`, *optional*): Whether or not to return the cropped image as a numpy array. Used for backwards compatibility with the previous ImageFeatureExtractionMixin method. - Unset: will return the same type as the input image. - `True`: will return a numpy array. - `False`: will return a `PIL.Image.Image` object. Returns: `np.ndarray`: The cropped image. """ requires_backends(center_crop, ["vision"]) if return_numpy is not None: warnings.warn("return_numpy is deprecated and will be removed in v.4.33", FutureWarning) return_numpy = True if return_numpy is None else return_numpy if not isinstance(image, np.ndarray): raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}") if not isinstance(size, Iterable) or len(size) != 2: raise ValueError("size must have 2 elements representing the height and width of the output image") if input_data_format is None: input_data_format = infer_channel_dimension_format(image) output_data_format = data_format if data_format is not None else input_data_format # We perform the crop in (C, H, W) format and then convert to the output format image = to_channel_dimension_format(image, ChannelDimension.FIRST, input_data_format) orig_height, orig_width = get_image_size(image, ChannelDimension.FIRST) crop_height, crop_width = size crop_height, crop_width = int(crop_height), int(crop_width) # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result. top = (orig_height - crop_height) // 2 bottom = top + crop_height # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result. left = (orig_width - crop_width) // 2 right = left + crop_width # Check if cropped area is within image boundaries if top >= 0 and bottom <= orig_height and left >= 0 and right <= orig_width: image = image[..., top:bottom, left:right] image = to_channel_dimension_format(image, output_data_format, ChannelDimension.FIRST) return image # Otherwise, we may need to pad if the image is too small. Oh joy... new_height = max(crop_height, orig_height) new_width = max(crop_width, orig_width) new_shape = image.shape[:-2] + (new_height, new_width) new_image = np.zeros_like(image, shape=new_shape) # If the image is too small, pad it with zeros top_pad = ceil((new_height - orig_height) / 2) bottom_pad = top_pad + orig_height left_pad = ceil((new_width - orig_width) / 2) right_pad = left_pad + orig_width new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image top += top_pad bottom += top_pad left += left_pad right += left_pad new_image = new_image[..., max(0, top) : min(new_height, bottom), max(0, left) : min(new_width, right)] new_image = to_channel_dimension_format(new_image, output_data_format, ChannelDimension.FIRST) if not return_numpy: new_image = to_pil_image(new_image) return new_image def _center_to_corners_format_torch(bboxes_center: "torch.Tensor") -> "torch.Tensor": center_x, center_y, width, height = bboxes_center.unbind(-1) bbox_corners = torch.stack( # top left x, top left y, bottom right x, bottom right y [(center_x - 0.5 * width), (center_y - 0.5 * height), (center_x + 0.5 * width), (center_y + 0.5 * height)], dim=-1, ) return bbox_corners def _center_to_corners_format_numpy(bboxes_center: np.ndarray) -> np.ndarray: center_x, center_y, width, height = bboxes_center.T bboxes_corners = np.stack( # top left x, top left y, bottom right x, bottom right y [center_x - 0.5 * width, center_y - 0.5 * height, center_x + 0.5 * width, center_y + 0.5 * height], axis=-1, ) return bboxes_corners def _center_to_corners_format_tf(bboxes_center: "tf.Tensor") -> "tf.Tensor": center_x, center_y, width, height = tf.unstack(bboxes_center, axis=-1) bboxes_corners = tf.stack( # top left x, top left y, bottom right x, bottom right y [center_x - 0.5 * width, center_y - 0.5 * height, center_x + 0.5 * width, center_y + 0.5 * height], axis=-1, ) return bboxes_corners # 2 functions below inspired by https://github.com/facebookresearch/detr/blob/master/util/box_ops.py def center_to_corners_format(bboxes_center: TensorType) -> TensorType: """ Converts bounding boxes from center format to corners format. center format: contains the coordinate for the center of the box and its width, height dimensions (center_x, center_y, width, height) corners format: contains the coodinates for the top-left and bottom-right corners of the box (top_left_x, top_left_y, bottom_right_x, bottom_right_y) """ # Function is used during model forward pass, so we use the input framework if possible, without # converting to numpy if is_torch_tensor(bboxes_center): return _center_to_corners_format_torch(bboxes_center) elif isinstance(bboxes_center, np.ndarray): return _center_to_corners_format_numpy(bboxes_center) elif is_tf_tensor(bboxes_center): return _center_to_corners_format_tf(bboxes_center) raise ValueError(f"Unsupported input type {type(bboxes_center)}") def _corners_to_center_format_torch(bboxes_corners: "torch.Tensor") -> "torch.Tensor": top_left_x, top_left_y, bottom_right_x, bottom_right_y = bboxes_corners.unbind(-1) b = [ (top_left_x + bottom_right_x) / 2, # center x (top_left_y + bottom_right_y) / 2, # center y (bottom_right_x - top_left_x), # width (bottom_right_y - top_left_y), # height ] return torch.stack(b, dim=-1) def _corners_to_center_format_numpy(bboxes_corners: np.ndarray) -> np.ndarray: top_left_x, top_left_y, bottom_right_x, bottom_right_y = bboxes_corners.T bboxes_center = np.stack( [ (top_left_x + bottom_right_x) / 2, # center x (top_left_y + bottom_right_y) / 2, # center y (bottom_right_x - top_left_x), # width (bottom_right_y - top_left_y), # height ], axis=-1, ) return bboxes_center def _corners_to_center_format_tf(bboxes_corners: "tf.Tensor") -> "tf.Tensor": top_left_x, top_left_y, bottom_right_x, bottom_right_y = tf.unstack(bboxes_corners, axis=-1) bboxes_center = tf.stack( [ (top_left_x + bottom_right_x) / 2, # center x (top_left_y + bottom_right_y) / 2, # center y (bottom_right_x - top_left_x), # width (bottom_right_y - top_left_y), # height ], axis=-1, ) return bboxes_center def corners_to_center_format(bboxes_corners: TensorType) -> TensorType: """ Converts bounding boxes from corners format to center format. corners format: contains the coordinates for the top-left and bottom-right corners of the box (top_left_x, top_left_y, bottom_right_x, bottom_right_y) center format: contains the coordinate for the center of the box and its the width, height dimensions (center_x, center_y, width, height) """ # Inverse function accepts different input types so implemented here too if is_torch_tensor(bboxes_corners): return _corners_to_center_format_torch(bboxes_corners) elif isinstance(bboxes_corners, np.ndarray): return _corners_to_center_format_numpy(bboxes_corners) elif is_tf_tensor(bboxes_corners): return _corners_to_center_format_tf(bboxes_corners) raise ValueError(f"Unsupported input type {type(bboxes_corners)}") # 2 functions below copied from https://github.com/cocodataset/panopticapi/blob/master/panopticapi/utils.py # Copyright (c) 2018, Alexander Kirillov # All rights reserved. def rgb_to_id(color): """ Converts RGB color to unique ID. """ if isinstance(color, np.ndarray) and len(color.shape) == 3: if color.dtype == np.uint8: color = color.astype(np.int32) return color[:, :, 0] + 256 * color[:, :, 1] + 256 * 256 * color[:, :, 2] return int(color[0] + 256 * color[1] + 256 * 256 * color[2]) def id_to_rgb(id_map): """ Converts unique ID to RGB color. """ if isinstance(id_map, np.ndarray): id_map_copy = id_map.copy() rgb_shape = tuple(list(id_map.shape) + [3]) rgb_map = np.zeros(rgb_shape, dtype=np.uint8) for i in range(3): rgb_map[..., i] = id_map_copy % 256 id_map_copy //= 256 return rgb_map color = [] for _ in range(3): color.append(id_map % 256) id_map //= 256 return color class PaddingMode(ExplicitEnum): """ Enum class for the different padding modes to use when padding images. """ CONSTANT = "constant" REFLECT = "reflect" REPLICATE = "replicate" SYMMETRIC = "symmetric" def pad( image: np.ndarray, padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]], mode: PaddingMode = PaddingMode.CONSTANT, constant_values: Union[float, Iterable[float]] = 0.0, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Pads the `image` with the specified (height, width) `padding` and `mode`. Args: image (`np.ndarray`): The image to pad. padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`): Padding to apply to the edges of the height, width axes. Can be one of three formats: - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis. - `((before, after),)` yields same before and after pad for height and width. - `(pad,)` or int is a shortcut for before = after = pad width for all axes. mode (`PaddingMode`): The padding mode to use. Can be one of: - `"constant"`: pads with a constant value. - `"reflect"`: pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. - `"replicate"`: pads with the replication of the last value on the edge of the array along each axis. - `"symmetric"`: pads with the reflection of the vector mirrored along the edge of the array. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. Returns: `np.ndarray`: The padded image. """ if input_data_format is None: input_data_format = infer_channel_dimension_format(image) def _expand_for_data_format(values): """ Convert values to be in the format expected by np.pad based on the data format. """ if isinstance(values, (int, float)): values = ((values, values), (values, values)) elif isinstance(values, tuple) and len(values) == 1: values = ((values[0], values[0]), (values[0], values[0])) elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int): values = (values, values) elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple): values = values else: raise ValueError(f"Unsupported format: {values}") # add 0 for channel dimension values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0)) # Add additional padding if there's a batch dimension values = (0, *values) if image.ndim == 4 else values return values padding = _expand_for_data_format(padding) if mode == PaddingMode.CONSTANT: constant_values = _expand_for_data_format(constant_values) image = np.pad(image, padding, mode="constant", constant_values=constant_values) elif mode == PaddingMode.REFLECT: image = np.pad(image, padding, mode="reflect") elif mode == PaddingMode.REPLICATE: image = np.pad(image, padding, mode="edge") elif mode == PaddingMode.SYMMETRIC: image = np.pad(image, padding, mode="symmetric") else: raise ValueError(f"Invalid padding mode: {mode}") image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image return image # TODO (Amy): Accept 1/3/4 channel numpy array as input and return np.array as default def convert_to_rgb(image: ImageInput) -> ImageInput: """ Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image as is. Args: image (Image): The image to convert. """ requires_backends(convert_to_rgb, ["vision"]) if not isinstance(image, PIL.Image.Image): return image if image.mode == "RGB": return image image = image.convert("RGB") return image def flip_channel_order( image: np.ndarray, data_format: Optional[ChannelDimension] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """ Flips the channel order of the image. If the image is in RGB format, it will be converted to BGR and vice versa. Args: image (`np.ndarray`): The image to flip. data_format (`ChannelDimension`, *optional*): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use same as the input image. input_data_format (`ChannelDimension`, *optional*): The channel dimension format for the input image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. If unset, will use the inferred format of the input image. """ input_data_format = infer_channel_dimension_format(image) if input_data_format is None else input_data_format if input_data_format == ChannelDimension.LAST: image = image[..., ::-1] elif input_data_format == ChannelDimension.FIRST: image = image[::-1, ...] else: raise ValueError(f"Unsupported channel dimension: {input_data_format}") if data_format is not None: image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) return image def _cast_tensor_to_float(x): if x.is_floating_point(): return x return x.float() class FusedRescaleNormalize: """ Rescale and normalize the input image in one step. """ def __init__(self, mean, std, rescale_factor: float = 1.0, inplace: bool = False): self.mean = torch.tensor(mean) * (1.0 / rescale_factor) self.std = torch.tensor(std) * (1.0 / rescale_factor) self.inplace = inplace def __call__(self, image: "torch.Tensor"): image = _cast_tensor_to_float(image) return F.normalize(image, self.mean, self.std, inplace=self.inplace) class Rescale: """ Rescale the input image by rescale factor: image *= rescale_factor. """ def __init__(self, rescale_factor: float = 1.0): self.rescale_factor = rescale_factor def __call__(self, image: "torch.Tensor"): image = image * self.rescale_factor return image class NumpyToTensor: """ Convert a numpy array to a PyTorch tensor. """ def __call__(self, image: np.ndarray): # Same as in PyTorch, we assume incoming numpy images are in HWC format # c.f. https://github.com/pytorch/vision/blob/61d97f41bc209e1407dcfbd685d2ee2da9c1cdad/torchvision/transforms/functional.py#L154 return torch.from_numpy(image.transpose(2, 0, 1)).contiguous()
transformers/src/transformers/image_transforms.py/0
{ "file_path": "transformers/src/transformers/image_transforms.py", "repo_id": "transformers", "token_count": 13885 }
339
#include "cuda_kernel.h" ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void index_max_cuda_kernel( float *index_vals, // [batch_size, 32, num_block] int *indices, // [batch_size, num_block] float *max_vals, // [batch_size, A_num_block * 32] float *max_vals_scatter, // [batch_size, 32, num_block] long batch_size, long A_num_block, long B_num_block, long num_block ) { long batch_idx = blockIdx.x; long thread_idx = threadIdx.x; long num_thread = blockDim.x; extern __shared__ float buffer[]; int *max_buffer = (int*)buffer; for (int i = 0; i < A_num_block * 32; i = i + num_thread) { int idx = i + thread_idx; if (idx < A_num_block * 32) { max_buffer[idx] = -1e8; } } __syncthreads(); int *indices_pt = &indices[batch_idx * num_block]; float *index_vals_pt = &index_vals[batch_idx * num_block * 32]; for (int idx_start = 0; idx_start < 32 * num_block; idx_start = idx_start + num_thread) { int idx = idx_start + thread_idx; int A_block_idx = indices_pt[idx % num_block] / B_num_block; atomicMax(&max_buffer[A_block_idx * 32 + idx / num_block], (int)(index_vals_pt[idx] * 1000)); } __syncthreads(); float *max_vals_pt = &max_vals[batch_idx * A_num_block * 32]; for (int i = 0; i < A_num_block * 32; i = i + num_thread) { int idx = i + thread_idx; if (idx < A_num_block * 32) { max_vals_pt[idx] = (float)max_buffer[idx] / 1000.; } } float *max_vals_scatter_pt = &max_vals_scatter[batch_idx * num_block * 32]; for (int idx_start = 0; idx_start < 32 * num_block; idx_start = idx_start + num_thread) { int idx = idx_start + thread_idx; int A_block_idx = indices_pt[idx % num_block] / B_num_block; max_vals_scatter_pt[idx] = (float)max_buffer[A_block_idx * 32 + idx / num_block] / 1000.; } } __global__ void mm_to_sparse_cuda_kernel( float *dense_A, // [batch_size, A_num_block, dim, 32] float *dense_B, // [batch_size, B_num_block, dim, 32] int *indices, // [batch_size, num_block] float *sparse_C, // [batch_size, num_block, 32, 32] long batch_size, long A_num_block, long B_num_block, long dim, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; __shared__ float buffer[4096]; float *A_buffer = &buffer[threadIdx.y * 1024]; // [2, 8, 32] float *B_buffer = &buffer[threadIdx.y * 1024 + 512]; // [2, 8, 32] long batch_idx__block_idx = batch_idx * num_block + block_idx; long AB_block_idx = indices[batch_idx__block_idx]; float *dense_A_pt = &dense_A[(batch_idx * A_num_block + AB_block_idx / B_num_block) * dim * 32]; float *dense_B_pt = &dense_B[(batch_idx * B_num_block + AB_block_idx % B_num_block) * dim * 32]; int reg_1_idx = thread_idx / 8; // [0000000011111111222222223333333344444444555555556666666677777777] int reg_2_idx = thread_idx % 8; // [0123456701234567012345670123456701234567012345670123456701234567] float reg_1[8]; float reg_2[8]; float reg_array[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #pragma unroll for (int i = 0; i < 4; i++) { A_buffer[i * 64 + thread_idx] = dense_A_pt[i * 64 + thread_idx]; B_buffer[i * 64 + thread_idx] = dense_B_pt[i * 64 + thread_idx]; } __syncthreads(); #pragma unroll for (int i = 0; i < 4; i++) { reg_1[i] = A_buffer[reg_1_idx * 4 + i]; reg_2[i] = B_buffer[reg_2_idx * 4 + i]; } for (int dim_stride = 1; dim_stride < (dim / 8); dim_stride++) { #pragma unroll for (int i = 0; i < 4; i++) { A_buffer[(dim_stride % 2) * 256 + i * 64 + thread_idx] = dense_A_pt[dim_stride * 256 + i * 64 + thread_idx]; B_buffer[(dim_stride % 2) * 256 + i * 64 + thread_idx] = dense_B_pt[dim_stride * 256 + i * 64 + thread_idx]; } #pragma unroll for (int mini_dim_idx = 1; mini_dim_idx < 8; mini_dim_idx++) { #pragma unroll for (int i = 0; i < 4; i++) { reg_1[(mini_dim_idx % 2) * 4 + i] = A_buffer[((dim_stride - 1) % 2) * 256 + mini_dim_idx * 32 + reg_1_idx * 4 + i]; reg_2[(mini_dim_idx % 2) * 4 + i] = B_buffer[((dim_stride - 1) % 2) * 256 + mini_dim_idx * 32 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; } } } __syncthreads(); #pragma unroll for (int i = 0; i < 4; i++) { reg_1[i] = A_buffer[(dim_stride % 2) * 256 + reg_1_idx * 4 + i]; reg_2[i] = B_buffer[(dim_stride % 2) * 256 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; } } } #pragma unroll for (int mini_dim_idx = 1; mini_dim_idx < 8; mini_dim_idx++) { #pragma unroll for (int i = 0; i < 4; i++) { reg_1[(mini_dim_idx % 2) * 4 + i] = A_buffer[256 + mini_dim_idx * 32 + reg_1_idx * 4 + i]; reg_2[(mini_dim_idx % 2) * 4 + i] = B_buffer[256 + mini_dim_idx * 32 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; } } } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; } } __syncthreads(); float *C_buffer = &buffer[threadIdx.y * 1024]; // [32, 32] #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { C_buffer[(reg_2_idx * 4 + j) * 32 + reg_1_idx * 4 + i] = reg_array[i * 4 + j]; } } __syncthreads(); float *sparse_C_pt = &sparse_C[batch_idx__block_idx * 1024]; #pragma unroll for (int i = 0; i < 16; i++) { sparse_C_pt[i * 64 + thread_idx] = C_buffer[i * 64 + thread_idx]; } } __global__ void sparse_dense_mm_cuda_kernel( float *sparse_A, // [batch_size, num_block, 32, 32] int *indices, // [batch_size, num_block] float *dense_B, // [batch_size, B_num_block, dim, 32] float *dense_C, // [batch_size, A_num_block, dim, 32] long batch_size, long A_num_block, long B_num_block, long dim, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; __shared__ float buffer[6144]; float *A_buffer = &buffer[threadIdx.y * 3072]; // [32, 32] float *B_buffer = &buffer[threadIdx.y * 3072 + 1024]; // [32, 64] long batch_idx__block_idx = batch_idx * num_block + block_idx; float *sparse_A_pt = &sparse_A[batch_idx__block_idx * 1024]; #pragma unroll for (int i = 0; i < 8; i++) { A_buffer[i * 128 + thread_idx] = sparse_A_pt[i * 128 + thread_idx]; } long AB_block_idx = indices[batch_idx__block_idx]; float *dense_B_pt = &dense_B[(batch_idx * B_num_block + AB_block_idx % B_num_block) * 32 * dim]; float *dense_C_pt = &dense_C[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32 * dim]; // [0000000011111111222222223333333344444444555555556666666677777777] // [0123456701234567012345670123456701234567012345670123456701234567] int reg_1_idx = thread_idx / 8; int reg_2_idx = thread_idx % 8; float reg_1[8]; float reg_2[8]; float reg_array[16]; for (int dim_stride = 0; dim_stride < dim; dim_stride = dim_stride + 64) { #pragma unroll for (int i = 0; i < 16; i++) { B_buffer[i * 128 + thread_idx] = dense_B_pt[dim_stride * 32 + i * 128 + thread_idx]; } #pragma unroll for (int i = 0; i < 16; i++) { reg_array[i] = 0; } __syncthreads(); #pragma unroll for (int i = 0; i < 4; i++) { reg_1[i] = B_buffer[(reg_1_idx * 4 + i) * 32]; reg_2[i] = A_buffer[reg_2_idx * 4 + i]; } #pragma unroll for (int mini_dim_idx = 1; mini_dim_idx < 32; mini_dim_idx++) { #pragma unroll for (int i = 0; i < 4; i++) { reg_1[(mini_dim_idx % 2) * 4 + i] = B_buffer[(reg_1_idx * 4 + i) * 32 + mini_dim_idx]; reg_2[(mini_dim_idx % 2) * 4 + i] = A_buffer[mini_dim_idx * 32 + reg_2_idx * 4 + i]; } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; } } } #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; } } __syncthreads(); float *C_buffer = &buffer[threadIdx.y * 3072 + 1024]; // [64, 32] #pragma unroll for (int i = 0; i < 4; i++) { #pragma unroll for (int j = 0; j < 4; j++) { C_buffer[(reg_1_idx * 4 + i) * 32 + reg_2_idx * 4 + j] = reg_array[i * 4 + j]; } } __syncthreads(); #pragma unroll for (int i = 0; i < 16; i++) { atomicAdd(&dense_C_pt[dim_stride * 32 + i * 128 + thread_idx], C_buffer[i * 128 + thread_idx]); } __syncthreads(); } } __global__ void reduce_sum_cuda_kernel( float *sparse_A, // [batch_size, num_block, 32, 32] int *indices, // [batch_size, num_block] float *dense_C, // [batch_size, A_num_block, 32] long batch_size, long A_num_block, long B_num_block, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; long batch_idx__block_idx = batch_idx * num_block + block_idx; long AB_block_idx = indices[batch_idx__block_idx]; float *sparse_A_pt = &sparse_A[batch_idx__block_idx * 1024]; float reg_array[16]; float value = 0; #pragma unroll for (int i = 0; i < 8; i++) { reg_array[i] = sparse_A_pt[i * 32 + thread_idx]; } #pragma unroll for (int stride = 8; stride < 32; stride = stride + 8) { #pragma unroll for (int i = 0; i < 8; i++) { reg_array[(stride + i) % 16] = sparse_A_pt[(stride + i) * 32 + thread_idx]; } #pragma unroll for (int i = 0; i < 8; i++) { value = value + reg_array[(stride - 8 + i) % 16]; } } #pragma unroll for (int i = 0; i < 8; i++) { value = value + reg_array[8 + i]; } float *dense_C_pt = &dense_C[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32]; atomicAdd(&dense_C_pt[thread_idx], value); } __global__ void scatter_cuda_kernel( float *dense_A, // [batch_size, A_num_block, 32] int *indices, // [batch_size, num_block] float *sparse_C, // [batch_size, num_block, 32, 32] long batch_size, long A_num_block, long B_num_block, long num_block ) { long batch_idx = blockIdx.y; long block_idx = blockIdx.x * blockDim.y + threadIdx.y; long thread_idx = threadIdx.x; long batch_idx__block_idx = batch_idx * num_block + block_idx; long AB_block_idx = indices[batch_idx__block_idx]; float *dense_A_pt = &dense_A[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32]; float *sparse_C_pt = &sparse_C[(batch_idx * num_block + block_idx) * 1024]; float value = dense_A_pt[thread_idx]; #pragma unroll for (int i = 0; i < 32; i++) { sparse_C_pt[i * 32 + thread_idx] = value; } }
transformers/src/transformers/kernels/mra/cuda_kernel.cu/0
{ "file_path": "transformers/src/transformers/kernels/mra/cuda_kernel.cu", "repo_id": "transformers", "token_count": 5563 }
340
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Configuration base class and utilities.""" import copy import json import os import warnings from dataclasses import dataclass from pathlib import Path from typing import Any, Dict, List, Optional, Union import requests import yaml from huggingface_hub import model_info from huggingface_hub.utils import HFValidationError from . import __version__ from .models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, ) from .training_args import ParallelMode from .utils import ( MODEL_CARD_NAME, cached_file, is_datasets_available, is_offline_mode, is_tf_available, is_tokenizers_available, is_torch_available, logging, ) TASK_MAPPING = { "text-generation": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, "image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, "image-segmentation": MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, "fill-mask": MODEL_FOR_MASKED_LM_MAPPING_NAMES, "object-detection": MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "text2text-generation": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "text-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "table-question-answering": MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, "audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, "automatic-speech-recognition": {**MODEL_FOR_CTC_MAPPING_NAMES, **MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES}, "zero-shot-image-classification": MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES, } logger = logging.get_logger(__name__) class ModelCard: r""" Structured Model Card class. Store model card as well as methods for loading/downloading/saving model cards. Please read the following paper for details and explanation on the sections: "Model Cards for Model Reporting" by Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://arxiv.org/abs/1810.03993 Note: A model card can be loaded and saved to disk. """ def __init__(self, **kwargs): warnings.warn( "The class `ModelCard` is deprecated and will be removed in version 5 of Transformers", FutureWarning ) # Recommended attributes from https://arxiv.org/abs/1810.03993 (see papers) self.model_details = kwargs.pop("model_details", {}) self.intended_use = kwargs.pop("intended_use", {}) self.factors = kwargs.pop("factors", {}) self.metrics = kwargs.pop("metrics", {}) self.evaluation_data = kwargs.pop("evaluation_data", {}) self.training_data = kwargs.pop("training_data", {}) self.quantitative_analyses = kwargs.pop("quantitative_analyses", {}) self.ethical_considerations = kwargs.pop("ethical_considerations", {}) self.caveats_and_recommendations = kwargs.pop("caveats_and_recommendations", {}) # Open additional attributes for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err def save_pretrained(self, save_directory_or_file): """Save a model card object to the directory or file `save_directory_or_file`.""" if os.path.isdir(save_directory_or_file): # If we save using the predefined names, we can load using `from_pretrained` output_model_card_file = os.path.join(save_directory_or_file, MODEL_CARD_NAME) else: output_model_card_file = save_directory_or_file self.to_json_file(output_model_card_file) logger.info(f"Model card saved in {output_model_card_file}") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate a [`ModelCard`] from a pre-trained model model card. Parameters: pretrained_model_name_or_path: either: - a string, the *model id* of a pretrained model card hosted inside a model repo on huggingface.co. - a path to a *directory* containing a model card file saved using the [`~ModelCard.save_pretrained`] method, e.g.: `./my_model_directory/`. - a path or url to a saved model card JSON *file*, e.g.: `./my_model_directory/modelcard.json`. cache_dir: (*optional*) string: Path to a directory in which a downloaded pre-trained model card should be cached if the standard cache should not be used. kwargs: (*optional*) dict: key/value pairs with which to update the ModelCard object after loading. - The values in kwargs of any keys which are model card attributes will be used to override the loaded values. - Behavior concerning key/value pairs whose keys are *not* model card attributes is controlled by the *return_unused_kwargs* keyword parameter. proxies: (*optional*) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. return_unused_kwargs: (*optional*) bool: - If False, then this function returns just the final model card object. - If True, then this functions returns a tuple *(model card, unused_kwargs)* where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not model card attributes: ie the part of kwargs which has not been used to update *ModelCard* and is otherwise ignored. Examples: ```python # Download model card from huggingface.co and cache. modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased") # Model card was saved using *save_pretrained('./test/saved_model/')* modelcard = ModelCard.from_pretrained("./test/saved_model/") modelcard = ModelCard.from_pretrained("./test/saved_model/modelcard.json") modelcard = ModelCard.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False) ```""" cache_dir = kwargs.pop("cache_dir", None) proxies = kwargs.pop("proxies", None) return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) from_pipeline = kwargs.pop("_from_pipeline", None) user_agent = {"file_type": "model_card"} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isfile(pretrained_model_name_or_path): resolved_model_card_file = pretrained_model_name_or_path is_local = True else: try: # Load from URL or cache if already cached resolved_model_card_file = cached_file( pretrained_model_name_or_path, filename=MODEL_CARD_NAME, cache_dir=cache_dir, proxies=proxies, user_agent=user_agent, ) if is_local: logger.info(f"loading model card file {resolved_model_card_file}") else: logger.info(f"loading model card file {MODEL_CARD_NAME} from cache at {resolved_model_card_file}") # Load model card modelcard = cls.from_json_file(resolved_model_card_file) except (EnvironmentError, json.JSONDecodeError): # We fall back on creating an empty model card modelcard = cls() # Update model card with kwargs if needed to_remove = [] for key, value in kwargs.items(): if hasattr(modelcard, key): setattr(modelcard, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info(f"Model card: {modelcard}") if return_unused_kwargs: return modelcard, kwargs else: return modelcard @classmethod def from_dict(cls, json_object): """Constructs a `ModelCard` from a Python dictionary of parameters.""" return cls(**json_object) @classmethod def from_json_file(cls, json_file): """Constructs a `ModelCard` from a json file of parameters.""" with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() dict_obj = json.loads(text) return cls(**dict_obj) def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path): """Save this instance to a json file.""" with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string()) AUTOGENERATED_TRAINER_COMMENT = """ <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> """ AUTOGENERATED_KERAS_COMMENT = """ <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> """ TASK_TAG_TO_NAME_MAPPING = { "fill-mask": "Masked Language Modeling", "image-classification": "Image Classification", "image-segmentation": "Image Segmentation", "multiple-choice": "Multiple Choice", "object-detection": "Object Detection", "question-answering": "Question Answering", "summarization": "Summarization", "table-question-answering": "Table Question Answering", "text-classification": "Text Classification", "text-generation": "Causal Language Modeling", "text2text-generation": "Sequence-to-sequence Language Modeling", "token-classification": "Token Classification", "translation": "Translation", "zero-shot-classification": "Zero Shot Classification", "automatic-speech-recognition": "Automatic Speech Recognition", "audio-classification": "Audio Classification", } METRIC_TAGS = [ "accuracy", "bleu", "f1", "matthews_correlation", "pearsonr", "precision", "recall", "rouge", "sacrebleu", "spearmanr", "wer", ] def _listify(obj): if obj is None: return [] elif isinstance(obj, str): return [obj] else: return obj def _insert_values_as_list(metadata, name, values): if values is None: return metadata if isinstance(values, str): values = [values] values = [v for v in values if v is not None] if len(values) == 0: return metadata metadata[name] = values return metadata def infer_metric_tags_from_eval_results(eval_results): if eval_results is None: return {} result = {} for key in eval_results.keys(): if key.lower().replace(" ", "_") in METRIC_TAGS: result[key.lower().replace(" ", "_")] = key elif key.lower() == "rouge1": result["rouge"] = key return result def _insert_value(metadata, name, value): if value is None: return metadata metadata[name] = value return metadata def is_hf_dataset(dataset): if not is_datasets_available(): return False from datasets import Dataset, IterableDataset return isinstance(dataset, (Dataset, IterableDataset)) def _get_mapping_values(mapping): result = [] for v in mapping.values(): if isinstance(v, (tuple, list)): result += list(v) else: result.append(v) return result @dataclass class TrainingSummary: model_name: str language: Optional[Union[str, List[str]]] = None license: Optional[str] = None tags: Optional[Union[str, List[str]]] = None finetuned_from: Optional[str] = None tasks: Optional[Union[str, List[str]]] = None dataset: Optional[Union[str, List[str]]] = None dataset_tags: Optional[Union[str, List[str]]] = None dataset_args: Optional[Union[str, List[str]]] = None dataset_metadata: Optional[Dict[str, Any]] = None eval_results: Optional[Dict[str, float]] = None eval_lines: Optional[List[str]] = None hyperparameters: Optional[Dict[str, Any]] = None source: Optional[str] = "trainer" def __post_init__(self): # Infer default license from the checkpoint used, if possible. if ( self.license is None and not is_offline_mode() and self.finetuned_from is not None and len(self.finetuned_from) > 0 ): try: info = model_info(self.finetuned_from) for tag in info.tags: if tag.startswith("license:"): self.license = tag[8:] except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError, HFValidationError): pass def create_model_index(self, metric_mapping): model_index = {"name": self.model_name} # Dataset mapping tag -> name dataset_names = _listify(self.dataset) dataset_tags = _listify(self.dataset_tags) dataset_args = _listify(self.dataset_args) dataset_metadata = _listify(self.dataset_metadata) if len(dataset_args) < len(dataset_tags): dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args)) dataset_mapping = dict(zip(dataset_tags, dataset_names)) dataset_arg_mapping = dict(zip(dataset_tags, dataset_args)) dataset_metadata_mapping = dict(zip(dataset_tags, dataset_metadata)) task_mapping = { task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING } model_index["results"] = [] if len(task_mapping) == 0 and len(dataset_mapping) == 0: return [model_index] if len(task_mapping) == 0: task_mapping = {None: None} if len(dataset_mapping) == 0: dataset_mapping = {None: None} # One entry per dataset and per task all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping] for task_tag, ds_tag in all_possibilities: result = {} if task_tag is not None: result["task"] = {"name": task_mapping[task_tag], "type": task_tag} if ds_tag is not None: metadata = dataset_metadata_mapping.get(ds_tag, {}) result["dataset"] = { "name": dataset_mapping[ds_tag], "type": ds_tag, **metadata, } if dataset_arg_mapping[ds_tag] is not None: result["dataset"]["args"] = dataset_arg_mapping[ds_tag] if len(metric_mapping) > 0: result["metrics"] = [] for metric_tag, metric_name in metric_mapping.items(): result["metrics"].append( { "name": metric_name, "type": metric_tag, "value": self.eval_results[metric_name], } ) # Remove partial results to avoid the model card being rejected. if "task" in result and "dataset" in result and "metrics" in result: model_index["results"].append(result) else: logger.info(f"Dropping the following result as it does not have all the necessary fields:\n{result}") return [model_index] def create_metadata(self): metric_mapping = infer_metric_tags_from_eval_results(self.eval_results) metadata = {} metadata = _insert_value(metadata, "library_name", "transformers") metadata = _insert_values_as_list(metadata, "language", self.language) metadata = _insert_value(metadata, "license", self.license) if self.finetuned_from is not None and isinstance(self.finetuned_from, str) and len(self.finetuned_from) > 0: metadata = _insert_value(metadata, "base_model", self.finetuned_from) metadata = _insert_values_as_list(metadata, "tags", self.tags) metadata = _insert_values_as_list(metadata, "datasets", self.dataset_tags) metadata = _insert_values_as_list(metadata, "metrics", list(metric_mapping.keys())) metadata["model-index"] = self.create_model_index(metric_mapping) return metadata def to_model_card(self): model_card = "" metadata = yaml.dump(self.create_metadata(), sort_keys=False) if len(metadata) > 0: model_card = f"---\n{metadata}---\n" # Now the model card for realsies. if self.source == "trainer": model_card += AUTOGENERATED_TRAINER_COMMENT else: model_card += AUTOGENERATED_KERAS_COMMENT model_card += f"\n# {self.model_name}\n\n" if self.finetuned_from is None: model_card += "This model was trained from scratch on " else: model_card += ( "This model is a fine-tuned version of" f" [{self.finetuned_from}](https://huggingface.co/{self.finetuned_from}) on " ) if self.dataset is None: model_card += "an unknown dataset." else: if isinstance(self.dataset, str): model_card += f"the {self.dataset} dataset." elif isinstance(self.dataset, (tuple, list)) and len(self.dataset) == 1: model_card += f"the {self.dataset[0]} dataset." else: model_card += ( ", ".join([f"the {ds}" for ds in self.dataset[:-1]]) + f" and the {self.dataset[-1]} datasets." ) if self.eval_results is not None: model_card += "\nIt achieves the following results on the evaluation set:\n" model_card += "\n".join([f"- {name}: {_maybe_round(value)}" for name, value in self.eval_results.items()]) model_card += "\n" model_card += "\n## Model description\n\nMore information needed\n" model_card += "\n## Intended uses & limitations\n\nMore information needed\n" model_card += "\n## Training and evaluation data\n\nMore information needed\n" model_card += "\n## Training procedure\n" model_card += "\n### Training hyperparameters\n" if self.hyperparameters is not None: model_card += "\nThe following hyperparameters were used during training:\n" model_card += "\n".join([f"- {name}: {value}" for name, value in self.hyperparameters.items()]) model_card += "\n" else: model_card += "\nMore information needed\n" if self.eval_lines is not None: model_card += "\n### Training results\n\n" model_card += make_markdown_table(self.eval_lines) model_card += "\n" model_card += "\n### Framework versions\n\n" model_card += f"- Transformers {__version__}\n" if self.source == "trainer" and is_torch_available(): import torch model_card += f"- Pytorch {torch.__version__}\n" elif self.source == "keras" and is_tf_available(): import tensorflow as tf model_card += f"- TensorFlow {tf.__version__}\n" if is_datasets_available(): import datasets model_card += f"- Datasets {datasets.__version__}\n" if is_tokenizers_available(): import tokenizers model_card += f"- Tokenizers {tokenizers.__version__}\n" return model_card @classmethod def from_trainer( cls, trainer, language=None, license=None, tags=None, model_name=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset_metadata=None, dataset=None, dataset_args=None, ): # Infer default from dataset one_dataset = trainer.eval_dataset if trainer.eval_dataset is not None else trainer.train_dataset if is_hf_dataset(one_dataset) and (dataset_tags is None or dataset_args is None or dataset_metadata is None): default_tag = one_dataset.builder_name # Those are not real datasets from the Hub so we exclude them. if default_tag not in ["csv", "json", "pandas", "parquet", "text"]: if dataset_metadata is None: dataset_metadata = [{"config": one_dataset.config_name, "split": str(one_dataset.split)}] if dataset_tags is None: dataset_tags = [default_tag] if dataset_args is None: dataset_args = [one_dataset.config_name] if dataset is None and dataset_tags is not None: dataset = dataset_tags # Infer default finetuned_from if ( finetuned_from is None and hasattr(trainer.model.config, "_name_or_path") and not os.path.isdir(trainer.model.config._name_or_path) ): finetuned_from = trainer.model.config._name_or_path # Infer default task tag: if tasks is None: model_class_name = trainer.model.__class__.__name__ for task, mapping in TASK_MAPPING.items(): if model_class_name in _get_mapping_values(mapping): tasks = task if model_name is None: model_name = Path(trainer.args.output_dir).name if len(model_name) == 0: model_name = finetuned_from # Add `generated_from_trainer` to the tags if tags is None: tags = ["generated_from_trainer"] elif isinstance(tags, str) and tags != "generated_from_trainer": tags = [tags, "generated_from_trainer"] elif "generated_from_trainer" not in tags: tags.append("generated_from_trainer") _, eval_lines, eval_results = parse_log_history(trainer.state.log_history) hyperparameters = extract_hyperparameters_from_trainer(trainer) return cls( language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset=dataset, dataset_tags=dataset_tags, dataset_args=dataset_args, dataset_metadata=dataset_metadata, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters, ) @classmethod def from_keras( cls, model, model_name, keras_history=None, language=None, license=None, tags=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset=None, dataset_args=None, ): # Infer default from dataset if dataset is not None: if is_hf_dataset(dataset) and (dataset_tags is None or dataset_args is None): default_tag = dataset.builder_name # Those are not real datasets from the Hub so we exclude them. if default_tag not in ["csv", "json", "pandas", "parquet", "text"]: if dataset_tags is None: dataset_tags = [default_tag] if dataset_args is None: dataset_args = [dataset.config_name] if dataset is None and dataset_tags is not None: dataset = dataset_tags # Infer default finetuned_from if ( finetuned_from is None and hasattr(model.config, "_name_or_path") and not os.path.isdir(model.config._name_or_path) ): finetuned_from = model.config._name_or_path # Infer default task tag: if tasks is None: model_class_name = model.__class__.__name__ for task, mapping in TASK_MAPPING.items(): if model_class_name in _get_mapping_values(mapping): tasks = task # Add `generated_from_keras_callback` to the tags if tags is None: tags = ["generated_from_keras_callback"] elif isinstance(tags, str) and tags != "generated_from_keras_callback": tags = [tags, "generated_from_keras_callback"] elif "generated_from_keras_callback" not in tags: tags.append("generated_from_keras_callback") if keras_history is not None: _, eval_lines, eval_results = parse_keras_history(keras_history) else: eval_lines = [] eval_results = {} hyperparameters = extract_hyperparameters_from_keras(model) return cls( language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters, source="keras", ) def parse_keras_history(logs): """ Parse the `logs` of either a `keras.History` object returned by `model.fit()` or an accumulated logs `dict` passed to the `PushToHubCallback`. Returns lines and logs compatible with those returned by `parse_log_history`. """ if hasattr(logs, "history"): # This looks like a `History` object if not hasattr(logs, "epoch"): # This history looks empty, return empty results return None, [], {} logs.history["epoch"] = logs.epoch logs = logs.history else: # Training logs is a list of dicts, let's invert it to a dict of lists to match a History object logs = {log_key: [single_dict[log_key] for single_dict in logs] for log_key in logs[0]} lines = [] for i in range(len(logs["epoch"])): epoch_dict = {log_key: log_value_list[i] for log_key, log_value_list in logs.items()} values = {} for k, v in epoch_dict.items(): if k.startswith("val_"): k = "validation_" + k[4:] elif k != "epoch": k = "train_" + k splits = k.split("_") name = " ".join([part.capitalize() for part in splits]) values[name] = v lines.append(values) eval_results = lines[-1] return logs, lines, eval_results def parse_log_history(log_history): """ Parse the `log_history` of a Trainer to get the intermediate and final evaluation results. """ idx = 0 while idx < len(log_history) and "train_runtime" not in log_history[idx]: idx += 1 # If there are no training logs if idx == len(log_history): idx -= 1 while idx >= 0 and "eval_loss" not in log_history[idx]: idx -= 1 if idx >= 0: return None, None, log_history[idx] else: return None, None, None # From now one we can assume we have training logs: train_log = log_history[idx] lines = [] training_loss = "No log" for i in range(idx): if "loss" in log_history[i]: training_loss = log_history[i]["loss"] if "eval_loss" in log_history[i]: metrics = log_history[i].copy() _ = metrics.pop("total_flos", None) epoch = metrics.pop("epoch", None) step = metrics.pop("step", None) _ = metrics.pop("eval_runtime", None) _ = metrics.pop("eval_samples_per_second", None) _ = metrics.pop("eval_steps_per_second", None) _ = metrics.pop("eval_jit_compilation_time", None) values = {"Training Loss": training_loss, "Epoch": epoch, "Step": step} for k, v in metrics.items(): if k == "eval_loss": values["Validation Loss"] = v else: splits = k.split("_") name = " ".join([part.capitalize() for part in splits[1:]]) values[name] = v lines.append(values) idx = len(log_history) - 1 while idx >= 0 and "eval_loss" not in log_history[idx]: idx -= 1 if idx > 0: eval_results = {} for key, value in log_history[idx].items(): if key.startswith("eval_"): key = key[5:] if key not in ["runtime", "samples_per_second", "steps_per_second", "epoch", "step"]: camel_cased_key = " ".join([part.capitalize() for part in key.split("_")]) eval_results[camel_cased_key] = value return train_log, lines, eval_results else: return train_log, lines, None def extract_hyperparameters_from_keras(model): from .modeling_tf_utils import keras hyperparameters = {} if hasattr(model, "optimizer") and model.optimizer is not None: hyperparameters["optimizer"] = model.optimizer.get_config() else: hyperparameters["optimizer"] = None hyperparameters["training_precision"] = keras.mixed_precision.global_policy().name return hyperparameters def _maybe_round(v, decimals=4): if isinstance(v, float) and len(str(v).split(".")) > 1 and len(str(v).split(".")[1]) > decimals: return f"{v:.{decimals}f}" return str(v) def _regular_table_line(values, col_widths): values_with_space = [f"| {v}" + " " * (w - len(v) + 1) for v, w in zip(values, col_widths)] return "".join(values_with_space) + "|\n" def _second_table_line(col_widths): values = ["|:" + "-" * w + ":" for w in col_widths] return "".join(values) + "|\n" def make_markdown_table(lines): """ Create a nice Markdown table from the results in `lines`. """ if lines is None or len(lines) == 0: return "" col_widths = {key: len(str(key)) for key in lines[0].keys()} for line in lines: for key, value in line.items(): if col_widths[key] < len(_maybe_round(value)): col_widths[key] = len(_maybe_round(value)) table = _regular_table_line(list(lines[0].keys()), list(col_widths.values())) table += _second_table_line(list(col_widths.values())) for line in lines: table += _regular_table_line([_maybe_round(v) for v in line.values()], list(col_widths.values())) return table _TRAINING_ARGS_KEYS = [ "learning_rate", "train_batch_size", "eval_batch_size", "seed", ] def extract_hyperparameters_from_trainer(trainer): hyperparameters = {k: getattr(trainer.args, k) for k in _TRAINING_ARGS_KEYS} if trainer.args.parallel_mode not in [ParallelMode.NOT_PARALLEL, ParallelMode.NOT_DISTRIBUTED]: hyperparameters["distributed_type"] = ( "multi-GPU" if trainer.args.parallel_mode == ParallelMode.DISTRIBUTED else trainer.args.parallel_mode.value ) if trainer.args.world_size > 1: hyperparameters["num_devices"] = trainer.args.world_size if trainer.args.gradient_accumulation_steps > 1: hyperparameters["gradient_accumulation_steps"] = trainer.args.gradient_accumulation_steps total_train_batch_size = ( trainer.args.train_batch_size * trainer.args.world_size * trainer.args.gradient_accumulation_steps ) if total_train_batch_size != hyperparameters["train_batch_size"]: hyperparameters["total_train_batch_size"] = total_train_batch_size total_eval_batch_size = trainer.args.eval_batch_size * trainer.args.world_size if total_eval_batch_size != hyperparameters["eval_batch_size"]: hyperparameters["total_eval_batch_size"] = total_eval_batch_size if trainer.args.adafactor: hyperparameters["optimizer"] = "Adafactor" else: hyperparameters["optimizer"] = ( f"Adam with betas=({trainer.args.adam_beta1},{trainer.args.adam_beta2}) and" f" epsilon={trainer.args.adam_epsilon}" ) hyperparameters["lr_scheduler_type"] = trainer.args.lr_scheduler_type.value if trainer.args.warmup_ratio != 0.0: hyperparameters["lr_scheduler_warmup_ratio"] = trainer.args.warmup_ratio if trainer.args.warmup_steps != 0.0: hyperparameters["lr_scheduler_warmup_steps"] = trainer.args.warmup_steps if trainer.args.max_steps != -1: hyperparameters["training_steps"] = trainer.args.max_steps else: hyperparameters["num_epochs"] = trainer.args.num_train_epochs if trainer.args.fp16: if trainer.use_apex: hyperparameters["mixed_precision_training"] = f"Apex, opt level {trainer.args.fp16_opt_level}" else: hyperparameters["mixed_precision_training"] = "Native AMP" if trainer.args.label_smoothing_factor != 0.0: hyperparameters["label_smoothing_factor"] = trainer.args.label_smoothing_factor return hyperparameters
transformers/src/transformers/modelcard.py/0
{ "file_path": "transformers/src/transformers/modelcard.py", "repo_id": "transformers", "token_count": 15565 }
341
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ALBERT checkpoint.""" import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, albert_config_file, pytorch_dump_path): # Initialise PyTorch model config = AlbertConfig.from_json_file(albert_config_file) print(f"Building PyTorch model from configuration: {config}") model = AlbertForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_albert(model, config, tf_checkpoint_path) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--albert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained ALBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
transformers/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 755 }
342
# coding=utf-8 # Copyright 2022 Google AI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Audio Spectogram Transformer (AST) model configuration""" from typing import Any, Dict from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class ASTConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ASTModel`]. It is used to instantiate an AST model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the AST [MIT/ast-finetuned-audioset-10-10-0.4593](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. patch_size (`int`, *optional*, defaults to 16): The size (resolution) of each patch. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. frequency_stride (`int`, *optional*, defaults to 10): Frequency stride to use when patchifying the spectrograms. time_stride (`int`, *optional*, defaults to 10): Temporal stride to use when patchifying the spectrograms. max_length (`int`, *optional*, defaults to 1024): Temporal dimension of the spectrograms. num_mel_bins (`int`, *optional*, defaults to 128): Frequency dimension of the spectrograms (number of Mel-frequency bins). Example: ```python >>> from transformers import ASTConfig, ASTModel >>> # Initializing a AST MIT/ast-finetuned-audioset-10-10-0.4593 style configuration >>> configuration = ASTConfig() >>> # Initializing a model (with random weights) from the MIT/ast-finetuned-audioset-10-10-0.4593 style configuration >>> model = ASTModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "audio-spectrogram-transformer" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, patch_size=16, qkv_bias=True, frequency_stride=10, time_stride=10, max_length=1024, num_mel_bins=128, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.patch_size = patch_size self.qkv_bias = qkv_bias self.frequency_stride = frequency_stride self.time_stride = time_stride self.max_length = max_length self.num_mel_bins = num_mel_bins # Overwritten from the parent class: AST is not compatible with `generate`, but has a config parameter sharing the # same name (`max_length`). Sharing the same name triggers checks regarding the config -> generation_config # generative parameters deprecation cycle, overwriting this function prevents this from happening. def _get_non_default_generation_parameters(self) -> Dict[str, Any]: return {}
transformers/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py/0
{ "file_path": "transformers/src/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py", "repo_id": "transformers", "token_count": 2141 }
343
# coding=utf-8 # Copyright (c) 2021 THUML @ Tsinghua University # Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Autoformer model.""" import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _prepare_4d_attention_mask from ...modeling_outputs import ( BaseModelOutput, ModelOutput, SampleTSPredictionOutput, Seq2SeqTSPredictionOutput, ) from ...modeling_utils import PreTrainedModel from ...time_series_utils import NegativeBinomialOutput, NormalOutput, StudentTOutput from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from .configuration_autoformer import AutoformerConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "AutoformerConfig" @dataclass class AutoFormerDecoderOutput(ModelOutput): """ Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. trend (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Trend tensor for each time series. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: torch.FloatTensor = None trend: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class AutoformerModelOutput(ModelOutput): """ Autoformer model output that contains the additional trend output. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, hidden_size)` is output. trend (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Trend tensor for each time series. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the optional initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the optional initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. loc (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Shift values of each time series' context window which is used to give the model inputs of the same magnitude and then used to shift back to the original magnitude. scale (`torch.FloatTensor` of shape `(batch_size,)` or `(batch_size, input_size)`, *optional*): Scaling values of each time series' context window which is used to give the model inputs of the same magnitude and then used to rescale back to the original magnitude. static_features: (`torch.FloatTensor` of shape `(batch_size, feature size)`, *optional*): Static features of each time series' in a batch which are copied to the covariates at inference time. """ last_hidden_state: torch.FloatTensor = None trend: torch.FloatTensor = None past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None loc: Optional[torch.FloatTensor] = None scale: Optional[torch.FloatTensor] = None static_features: Optional[torch.FloatTensor] = None # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesFeatureEmbedder with TimeSeries->Autoformer class AutoformerFeatureEmbedder(nn.Module): """ Embed a sequence of categorical features. Args: cardinalities (`list[int]`): List of cardinalities of the categorical features. embedding_dims (`list[int]`): List of embedding dimensions of the categorical features. """ def __init__(self, cardinalities: List[int], embedding_dims: List[int]) -> None: super().__init__() self.num_features = len(cardinalities) self.embedders = nn.ModuleList([nn.Embedding(c, d) for c, d in zip(cardinalities, embedding_dims)]) def forward(self, features: torch.Tensor) -> torch.Tensor: if self.num_features > 1: # we slice the last dimension, giving an array of length # self.num_features with shape (N,T) or (N) cat_feature_slices = torch.chunk(features, self.num_features, dim=-1) else: cat_feature_slices = [features] return torch.cat( [ embed(cat_feature_slice.squeeze(-1)) for embed, cat_feature_slice in zip(self.embedders, cat_feature_slices) ], dim=-1, ) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesStdScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer class AutoformerStdScaler(nn.Module): """ Standardize features by calculating the mean and scaling along the first dimension, and then normalizes it by subtracting from the mean and dividing by the standard deviation. """ def __init__(self, config: AutoformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-5 def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ denominator = observed_indicator.sum(self.dim, keepdim=self.keepdim) denominator = denominator.clamp_min(1.0) loc = (data * observed_indicator).sum(self.dim, keepdim=self.keepdim) / denominator variance = (((data - loc) * observed_indicator) ** 2).sum(self.dim, keepdim=self.keepdim) / denominator scale = torch.sqrt(variance + self.minimum_scale) return (data - loc) / scale, loc, scale # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesMeanScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer class AutoformerMeanScaler(nn.Module): """ Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data accordingly. """ def __init__(self, config: AutoformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10 self.default_scale = config.default_scale if hasattr(config, "default_scale") else None def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`): Calculating the scale on the observed indicator. Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True) num_observed = observed_indicator.sum(self.dim, keepdim=True) scale = ts_sum / torch.clamp(num_observed, min=1) # If `default_scale` is provided, we use it, otherwise we use the scale # of the batch. if self.default_scale is None: batch_sum = ts_sum.sum(dim=0) batch_observations = torch.clamp(num_observed.sum(0), min=1) default_scale = torch.squeeze(batch_sum / batch_observations) else: default_scale = self.default_scale * torch.ones_like(scale) # apply default scale where there are no observations scale = torch.where(num_observed > 0, scale, default_scale) # ensure the scale is at least `self.minimum_scale` scale = torch.clamp(scale, min=self.minimum_scale) scaled_data = data / scale if not self.keepdim: scale = scale.squeeze(dim=self.dim) return scaled_data, torch.zeros_like(scale), scale # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer class AutoformerNOPScaler(nn.Module): """ Assigns a scaling factor equal to 1 along the first dimension, and therefore applies no scaling to the input data. """ def __init__(self, config: AutoformerConfig): super().__init__() self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1 self.keepdim = config.keepdim if hasattr(config, "keepdim") else True def forward( self, data: torch.Tensor, observed_indicator: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Parameters: data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`): input for Batch norm calculation Returns: tuple of `torch.Tensor` of shapes (`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`, `(batch_size, 1, num_input_channels)`) """ scale = torch.ones_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) loc = torch.zeros_like(data, requires_grad=False).mean(dim=self.dim, keepdim=self.keepdim) return data, loc, scale # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.weighted_average def weighted_average(input_tensor: torch.Tensor, weights: Optional[torch.Tensor] = None, dim=None) -> torch.Tensor: """ Computes the weighted average of a given tensor across a given `dim`, masking values associated with weight zero, meaning instead of `nan * 0 = nan` you will get `0 * 0 = 0`. Args: input_tensor (`torch.FloatTensor`): Input tensor, of which the average must be computed. weights (`torch.FloatTensor`, *optional*): Weights tensor, of the same shape as `input_tensor`. dim (`int`, *optional*): The dim along which to average `input_tensor`. Returns: `torch.FloatTensor`: The tensor with values averaged along the specified `dim`. """ if weights is not None: weighted_tensor = torch.where(weights != 0, input_tensor * weights, torch.zeros_like(input_tensor)) sum_weights = torch.clamp(weights.sum(dim=dim) if dim else weights.sum(), min=1.0) return (weighted_tensor.sum(dim=dim) if dim else weighted_tensor.sum()) / sum_weights else: return input_tensor.mean(dim=dim) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.nll def nll(input: torch.distributions.Distribution, target: torch.Tensor) -> torch.Tensor: """ Computes the negative log likelihood loss from input distribution with respect to target. """ return -input.log_prob(target) # Copied from transformers.models.marian.modeling_marian.MarianSinusoidalPositionalEmbedding with Marian->Autoformer class AutoformerSinusoidalPositionalEmbedding(nn.Embedding): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None) -> None: super().__init__(num_positions, embedding_dim) self.weight = self._init_weight(self.weight) @staticmethod def _init_weight(out: nn.Parameter) -> nn.Parameter: """ Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in the 2nd half of the vector. [dim // 2:] """ n_pos, dim = out.shape position_enc = np.array( [[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)] ) out.requires_grad = False # set early to avoid an error in pytorch-1.8+ sentinel = dim // 2 if dim % 2 == 0 else (dim // 2) + 1 out[:, 0:sentinel] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, sentinel:] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() return out @torch.no_grad() def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0) -> torch.Tensor: """`input_ids_shape` is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids_shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ) return super().forward(positions) # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesValueEmbedding with TimeSeries->Autoformer class AutoformerValueEmbedding(nn.Module): def __init__(self, feature_size, d_model): super().__init__() self.value_projection = nn.Linear(in_features=feature_size, out_features=d_model, bias=False) def forward(self, x): return self.value_projection(x) # Class based on # https://github.com/thuml/Autoformer/blob/c6a0694ff484753f2d986cc0bb1f99ee850fc1a8/layers/Autoformer_EncDec.py#L39 # where AutoformerSeriesDecompositionLayer is series_decomp + moving_average class AutoformerSeriesDecompositionLayer(nn.Module): """ Returns the trend and the seasonal parts of the time series. Calculated as: x_trend = AvgPool(Padding(X)) and x_seasonal = X - x_trend """ def __init__(self, config: AutoformerConfig): super().__init__() self.kernel_size = config.moving_average self.avg = nn.AvgPool1d(kernel_size=self.kernel_size, stride=1, padding=0) def forward(self, x): """Input shape: Batch x Time x EMBED_DIM""" # padding on the both ends of time series num_of_pads = (self.kernel_size - 1) // 2 front = x[:, 0:1, :].repeat(1, num_of_pads, 1) end = x[:, -1:, :].repeat(1, num_of_pads, 1) x_padded = torch.cat([front, x, end], dim=1) # calculate the trend and seasonal part of the series x_trend = self.avg(x_padded.permute(0, 2, 1)).permute(0, 2, 1) x_seasonal = x - x_trend return x_seasonal, x_trend # Class based on # https://github.com/thuml/Autoformer/blob/c6a0694ff484753f2d986cc0bb1f99ee850fc1a8/layers/Autoformer_EncDec.py#L6 # where AutoformerLayernorm is my_Layernorm class AutoformerLayernorm(nn.Module): """ Special designed layer normalization for the seasonal part, calculated as: AutoformerLayernorm(x) = nn.LayerNorm(x) - torch.mean(nn.LayerNorm(x)) """ def __init__(self, config: AutoformerConfig): super().__init__() self.layernorm = nn.LayerNorm(config.d_model) def forward(self, x): x_hat = self.layernorm(x) bias = torch.mean(x_hat, dim=1).unsqueeze(1).repeat(1, x.shape[1], 1) return x_hat - bias class AutoformerAttention(nn.Module): """ AutoCorrelation Mechanism with the following two phases: (1) period-based dependencies discovery (2) time delay aggregation This block replace the canonical self-attention mechanism. """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, autocorrelation_factor: int = 3, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.autocorrelation_factor = autocorrelation_factor def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) # (1) period-based dependencies discovery # Resize (truncation or zero filling) queries_time_length = query_states.size(1) values_time_length = value_states.size(1) if queries_time_length > values_time_length: query_states = query_states[:, : (queries_time_length - values_time_length), :] zeros = torch.zeros_like(query_states).float() value_states = torch.cat([value_states, zeros], dim=1) key_states = torch.cat([key_states, zeros], dim=1) else: value_states = value_states[:, :queries_time_length, :] key_states = key_states[:, :queries_time_length, :] query_states_fft = torch.fft.rfft(query_states, n=tgt_len, dim=1) key_states_fft = torch.fft.rfft(key_states, n=tgt_len, dim=1) attn_weights = query_states_fft * torch.conj(key_states_fft) attn_weights = torch.fft.irfft(attn_weights, n=tgt_len, dim=1) # Autocorrelation(Q,K) src_len = key_states.size(1) channel = key_states.size(2) if attn_weights.size() != (bsz * self.num_heads, tgt_len, channel): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, channel)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, channel) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, channel) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, channel) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, channel) else: attn_weights_reshaped = None # time delay aggregation time_length = value_states.size(1) autocorrelations = attn_weights.view(bsz, self.num_heads, tgt_len, channel) # find top k autocorrelations delays top_k = int(self.autocorrelation_factor * math.log(time_length)) autocorrelations_mean_on_head_channel = torch.mean(autocorrelations, dim=(1, -1)) # bsz x tgt_len if self.training: autocorrelations_mean_on_bsz = torch.mean(autocorrelations_mean_on_head_channel, dim=0) _, top_k_delays_index = torch.topk(autocorrelations_mean_on_bsz, top_k) top_k_autocorrelations = torch.stack( [autocorrelations_mean_on_head_channel[:, top_k_delays_index[i]] for i in range(top_k)], dim=-1 ) else: top_k_autocorrelations, top_k_delays_index = torch.topk( autocorrelations_mean_on_head_channel, top_k, dim=1 ) top_k_autocorrelations = torch.softmax(top_k_autocorrelations, dim=-1) # bsz x top_k # compute aggregation: value_states.roll(delay) * top_k_autocorrelations(delay) if not self.training: # used for compute values_states.roll(delay) in inference tmp_values = value_states.repeat(1, 2, 1) init_index = ( torch.arange(time_length) .view(1, -1, 1) .repeat(bsz * self.num_heads, 1, channel) .to(value_states.device) ) delays_agg = torch.zeros_like(value_states).float() # bsz x time_length x channel for i in range(top_k): # compute value_states roll delay if not self.training: tmp_delay = init_index + top_k_delays_index[:, i].view(-1, 1, 1).repeat( self.num_heads, tgt_len, channel ) value_states_roll_delay = torch.gather(tmp_values, dim=1, index=tmp_delay) else: value_states_roll_delay = value_states.roll(shifts=-int(top_k_delays_index[i]), dims=1) # aggregation top_k_autocorrelations_at_delay = ( top_k_autocorrelations[:, i].view(-1, 1, 1).repeat(self.num_heads, tgt_len, channel) ) delays_agg += value_states_roll_delay * top_k_autocorrelations_at_delay attn_output = delays_agg.contiguous() if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned across GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class AutoformerEncoderLayer(nn.Module): def __init__(self, config: AutoformerConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = AutoformerAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, autocorrelation_factor=config.autocorrelation_factor, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = AutoformerLayernorm(config) self.decomp1 = AutoformerSeriesDecompositionLayer(config) self.decomp2 = AutoformerSeriesDecompositionLayer(config) def forward( self, hidden_states: torch.FloatTensor, attention_mask: torch.FloatTensor, layer_head_mask: torch.FloatTensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # added layer norm here as an improvement hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, _ = self.decomp1(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states, _ = self.decomp2(hidden_states) hidden_states = self.final_layer_norm(hidden_states) if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class AutoformerDecoderLayer(nn.Module): def __init__(self, config: AutoformerConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = AutoformerAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, autocorrelation_factor=config.autocorrelation_factor, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = AutoformerAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, autocorrelation_factor=config.autocorrelation_factor, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = AutoformerLayernorm(config) self.decomp1 = AutoformerSeriesDecompositionLayer(config) self.decomp2 = AutoformerSeriesDecompositionLayer(config) self.decomp3 = AutoformerSeriesDecompositionLayer(config) # source: https://github.com/thuml/Autoformer/blob/e6371e24f2ae2dd53e472edefdd5814c5176f864/layers/Autoformer_EncDec.py#L128 self.trend_projection = nn.Conv1d( in_channels=self.embed_dim, out_channels=config.feature_size, kernel_size=3, stride=1, padding=1, padding_mode="circular", bias=False, ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache: (`bool`, *optional*, defaults to `True`): Whether or not the model should return the `present_key_value` state to be used for subsequent decoding. """ residual = hidden_states # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states, trend1 = self.decomp1(hidden_states) # added layer norm here as an improvement hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states, trend2 = self.decomp2(hidden_states) # added layer norm here as an improvement hidden_states = self.encoder_attn_layer_norm(hidden_states) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states, trend3 = self.decomp3(hidden_states) hidden_states = self.final_layer_norm(hidden_states) if encoder_hidden_states is not None: residual_trend = trend1 + trend2 + trend3 else: residual_trend = trend1 + trend3 residual_trend = self.trend_projection(residual_trend.permute(0, 2, 1)).transpose(1, 2) outputs = ((hidden_states, residual_trend),) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class AutoformerPreTrainedModel(PreTrainedModel): config_class = AutoformerConfig base_model_prefix = "model" main_input_name = "past_values" supports_gradient_checkpointing = True def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, AutoformerSinusoidalPositionalEmbedding): pass elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() AUTOFORMER_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`AutoformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ AUTOFORMER_INPUTS_DOCSTRING = r""" Args: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Past values of the time series, that serve as context in order to predict the future. These values may contain lags, i.e. additional values from the past which are added in order to serve as "extra context". The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as `static_categorical_features`, `static_real_features`, `past_time_features`). The sequence length here is equal to `context_length` + `max(config.lags_sequence)`. Missing values need to be replaced with zeros. past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`, *optional*): Optional time features, which the model internally will add to `past_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Autoformer only learns additional embeddings for `static_categorical_features`. past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*): Optional static categorical features for which the model will learn an embedding, which it will add to the values of the time series. Static categorical features are features which have the same value for all time steps (static over time). A typical example of a static categorical feature is a time series ID. static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*): Optional static real features which the model will add to the values of the time series. Static real features are features which have the same value for all time steps (static over time). A typical example of a static real feature is promotion information. future_values (`torch.FloatTensor` of shape `(batch_size, prediction_length)`): Future values of the time series, that serve as labels for the model. The `future_values` is what the Transformer needs to learn to output, given the `past_values`. See the demo notebook and code snippets for details. Missing values need to be replaced with zeros. future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`, *optional*): Optional time features, which the model internally will add to `future_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional features. The Autoformer only learns additional embeddings for `static_categorical_features`. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on certain token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Mask to avoid performing attention on certain token indices. By default, a causal mask will be used, to make sure the model can only look at previous inputs in order to predict the future. head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of `last_hidden_state`, `hidden_states` (*optional*) and `attentions` (*optional*) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` (*optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesTransformerEncoder with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer class AutoformerEncoder(AutoformerPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`AutoformerEncoderLayer`]. Args: config: AutoformerConfig """ def __init__(self, config: AutoformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop if config.prediction_length is None: raise ValueError("The `prediction_length` config needs to be specified.") self.value_embedding = AutoformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = AutoformerSinusoidalPositionalEmbedding( config.context_length + config.prediction_length, config.d_model ) self.layers = nn.ModuleList([AutoformerEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions(inputs_embeds.size()) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) to_drop = False if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: # skip the layer to_drop = True if to_drop: layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class AutoformerDecoder(AutoformerPreTrainedModel): """ Transformer decoder consisting of `config.decoder_layers` layers. Each layer is a [`AutoformerDecoderLayer`] Args: config: AutoformerConfig """ def __init__(self, config: AutoformerConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop if config.prediction_length is None: raise ValueError("The `prediction_length` config needs to be specified.") self.value_embedding = AutoformerValueEmbedding(feature_size=config.feature_size, d_model=config.d_model) self.embed_positions = AutoformerSinusoidalPositionalEmbedding( config.context_length + config.prediction_length, config.d_model ) self.layers = nn.ModuleList([AutoformerDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) # https://github.com/thuml/Autoformer/blob/e6371e24f2ae2dd53e472edefdd5814c5176f864/models/Autoformer.py#L74 self.seasonality_projection = nn.Linear(config.d_model, config.feature_size) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, trend: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, AutoFormerDecoderOutput]: r""" Args: trend (`torch.FloatTensor` of shape `(batch_size, prediction_length, feature_size)`, *optional*): The trend sequence to be fed to the decoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If `use_cache` is True, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_shape = inputs_embeds.size()[:-1] # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _prepare_4d_attention_mask( encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ) hidden_states = self.value_embedding(inputs_embeds) embed_pos = self.embed_positions( inputs_embeds.size(), past_key_values_length=self.config.context_length - self.config.label_length ) hidden_states = self.layernorm_embedding(hidden_states + embed_pos) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) if self.training: dropout_probability = torch.rand([]) if dropout_probability < self.layerdrop: continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False layer_outputs = self._gradient_checkpointing_func( decoder_layer.__call__, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, output_attentions, use_cache, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) (hidden_states, residual_trend) = layer_outputs[0] trend = trend + residual_trend if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # project seasonality representation hidden_states = self.seasonality_projection(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, trend, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return AutoFormerDecoderOutput( last_hidden_state=hidden_states, trend=trend, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare Autoformer Model outputting raw hidden-states without any specific head on top.", AUTOFORMER_START_DOCSTRING, ) class AutoformerModel(AutoformerPreTrainedModel): def __init__(self, config: AutoformerConfig): super().__init__(config) if config.scaling == "mean" or config.scaling is True: self.scaler = AutoformerMeanScaler(config) elif config.scaling == "std": self.scaler = AutoformerStdScaler(config) else: self.scaler = AutoformerNOPScaler(config) if config.num_static_categorical_features > 0: self.embedder = AutoformerFeatureEmbedder( cardinalities=config.cardinality, embedding_dims=config.embedding_dimension ) # transformer encoder-decoder and mask initializer self.encoder = AutoformerEncoder(config) self.decoder = AutoformerDecoder(config) # used for decoder seasonal and trend initialization self.decomposition_layer = AutoformerSeriesDecompositionLayer(config) # Initialize weights and apply final processing self.post_init() @property def _past_length(self) -> int: return self.config.context_length + max(self.config.lags_sequence) def get_lagged_subsequences( self, sequence: torch.Tensor, subsequences_length: int, shift: int = 0 ) -> torch.Tensor: """ Returns lagged subsequences of a given sequence. Returns a tensor of shape (batch_size, subsequences_length, feature_size, indices_length), containing lagged subsequences. Specifically, lagged[i, j, :, k] = sequence[i, -indices[k]-subsequences_length+j, :]. Args: sequence (`torch.Tensor` or shape `(batch_size, context_length, feature_size)`): The sequence from which lagged subsequences should be extracted. subsequences_length (`int`): Length of the subsequences to be extracted. shift (`int`, *optional* defaults to 0): Shift the lags by this amount back in the time index. """ # calculates the indices of the lags by subtracting the shift value from the given lags_sequence indices = [lag - shift for lag in self.config.lags_sequence] # checks if the maximum lag plus the length of the subsequences exceeds the length of the input sequence sequence_length = sequence.shape[1] if max(indices) + subsequences_length > sequence_length: raise ValueError( f"lags cannot go further than history length, found lag {max(indices)} " f"while history length is only {sequence_length}" ) # extracts the lagged subsequences from the input sequence using the calculated indices lagged_values = [] for lag_index in indices: begin_index = -lag_index - subsequences_length end_index = -lag_index if lag_index > 0 else None lagged_values.append(sequence[:, begin_index:end_index, ...]) # return as stacked tensor in the feature dimension return torch.stack(lagged_values, dim=-1) def create_network_inputs( self, past_values: torch.Tensor, past_time_features: torch.Tensor, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, past_observed_mask: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, future_time_features: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ Creates the inputs for the network given the past and future values, time features, and static features. Args: past_values (`torch.Tensor`): A tensor of shape `(batch_size, past_length, input_size)` containing the past values. past_time_features (`torch.Tensor`): A tensor of shape `(batch_size, past_length, num_features)` containing the past time features. static_categorical_features (`Optional[torch.Tensor]`): An optional tensor of shape `(batch_size, num_categorical_features)` containing the static categorical features. static_real_features (`Optional[torch.Tensor]`): An optional tensor of shape `(batch_size, num_real_features)` containing the static real features. past_observed_mask (`Optional[torch.Tensor]`): An optional tensor of shape `(batch_size, past_length, input_size)` containing the mask of observed values in the past. future_values (`Optional[torch.Tensor]`): An optional tensor of shape `(batch_size, future_length, input_size)` containing the future values. Returns: A tuple containing the following tensors: - reshaped_lagged_sequence (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_lags * input_size)` containing the lagged subsequences of the inputs. - features (`torch.Tensor`): A tensor of shape `(batch_size, sequence_length, num_features)` containing the concatenated static and time features. - loc (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the mean of the input values. - scale (`torch.Tensor`): A tensor of shape `(batch_size, input_size)` containing the std of the input values. - static_feat (`torch.Tensor`): A tensor of shape `(batch_size, num_static_features)` containing the concatenated static features. """ # time feature time_feat = ( torch.cat( ( past_time_features[:, self._past_length - self.config.context_length :, ...], future_time_features, ), dim=1, ) if future_values is not None else past_time_features[:, self._past_length - self.config.context_length :, ...] ) # target if past_observed_mask is None: past_observed_mask = torch.ones_like(past_values) context = past_values[:, -self.config.context_length :] observed_context = past_observed_mask[:, -self.config.context_length :] _, loc, scale = self.scaler(context, observed_context) inputs = ( (torch.cat((past_values, future_values), dim=1) - loc) / scale if future_values is not None else (past_values - loc) / scale ) # static features log_abs_loc = loc.abs().log1p() if self.config.input_size == 1 else loc.squeeze(1).abs().log1p() log_scale = scale.log() if self.config.input_size == 1 else scale.squeeze(1).log() static_feat = torch.cat((log_abs_loc, log_scale), dim=1) if static_real_features is not None: static_feat = torch.cat((static_real_features, static_feat), dim=1) if static_categorical_features is not None: embedded_cat = self.embedder(static_categorical_features) static_feat = torch.cat((embedded_cat, static_feat), dim=1) expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_feat.shape[1], -1) # all features features = torch.cat((expanded_static_feat, time_feat), dim=-1) # lagged features subsequences_length = ( self.config.context_length + self.config.prediction_length if future_values is not None else self.config.context_length ) lagged_sequence = self.get_lagged_subsequences(sequence=inputs, subsequences_length=subsequences_length) lags_shape = lagged_sequence.shape reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) if reshaped_lagged_sequence.shape[1] != time_feat.shape[1]: raise ValueError( f"input length {reshaped_lagged_sequence.shape[1]} and time feature lengths {time_feat.shape[1]} does not match" ) return reshaped_lagged_sequence, features, loc, scale, static_feat def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(AUTOFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=AutoformerModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, future_time_features: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, use_cache: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[AutoformerModelOutput, Tuple]: r""" Returns: Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import AutoformerModel >>> file = hf_hub_download( ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> model = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly") >>> # during training, one provides both past and future values >>> # as well as possible additional features >>> outputs = model( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... future_values=batch["future_values"], ... future_time_features=batch["future_time_features"], ... ) >>> last_hidden_state = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_inputs, temporal_features, loc, scale, static_feat = self.create_network_inputs( past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, ) if encoder_outputs is None: enc_input = torch.cat( ( transformer_inputs[:, : self.config.context_length, ...], temporal_features[:, : self.config.context_length, ...], ), dim=-1, ) encoder_outputs = self.encoder( inputs_embeds=enc_input, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) if future_values is not None: # Decoder inputs # seasonality and trend from context length seasonal_input, trend_input = self.decomposition_layer( transformer_inputs[:, : self.config.context_length, ...] ) mean = ( torch.mean(transformer_inputs[:, : self.config.context_length, ...], dim=1) .unsqueeze(1) .repeat(1, self.config.prediction_length, 1) ) zeros = torch.zeros( [transformer_inputs.shape[0], self.config.prediction_length, transformer_inputs.shape[2]], device=enc_input.device, ) decoder_input = torch.cat( ( torch.cat((seasonal_input[:, -self.config.label_length :, ...], zeros), dim=1), temporal_features[:, self.config.context_length - self.config.label_length :, ...], ), dim=-1, ) trend_init = torch.cat( ( torch.cat((trend_input[:, -self.config.label_length :, ...], mean), dim=1), temporal_features[:, self.config.context_length - self.config.label_length :, ...], ), dim=-1, ) decoder_outputs = self.decoder( trend=trend_init, inputs_embeds=decoder_input, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) else: decoder_outputs = AutoFormerDecoderOutput() if not return_dict: return decoder_outputs + encoder_outputs + (loc, scale, static_feat) return AutoformerModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, trend=decoder_outputs.trend, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, loc=loc, scale=scale, static_features=static_feat, ) @add_start_docstrings( "The Autoformer Model with a distribution head on top for time-series forecasting.", AUTOFORMER_START_DOCSTRING, ) class AutoformerForPrediction(AutoformerPreTrainedModel): def __init__(self, config: AutoformerConfig): super().__init__(config) self.model = AutoformerModel(config) if config.distribution_output == "student_t": self.distribution_output = StudentTOutput(dim=config.input_size) elif config.distribution_output == "normal": self.distribution_output = NormalOutput(dim=config.input_size) elif config.distribution_output == "negative_binomial": self.distribution_output = NegativeBinomialOutput(dim=config.input_size) else: raise ValueError(f"Unknown distribution output {config.distribution_output}") self.parameter_projection = self.distribution_output.get_parameter_projection(self.model.config.feature_size) self.target_shape = self.distribution_output.event_shape if config.loss == "nll": self.loss = nll else: raise ValueError(f"Unknown loss function {config.loss}") # Initialize weights of distribution_output and apply final processing self.post_init() def output_params(self, decoder_output): return self.parameter_projection(decoder_output[:, -self.config.prediction_length :, :]) def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() @torch.jit.ignore def output_distribution(self, params, loc=None, scale=None, trailing_n=None) -> torch.distributions.Distribution: sliced_params = params if trailing_n is not None: sliced_params = [p[:, -trailing_n:] for p in params] return self.distribution_output.distribution(sliced_params, loc=loc, scale=scale) @add_start_docstrings_to_model_forward(AUTOFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqTSPredictionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, past_values: torch.Tensor, past_time_features: torch.Tensor, past_observed_mask: torch.Tensor, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, future_values: Optional[torch.Tensor] = None, future_time_features: Optional[torch.Tensor] = None, future_observed_mask: Optional[torch.Tensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, use_cache: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Seq2SeqTSPredictionOutput, Tuple]: r""" Returns: Examples: ```python >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import AutoformerForPrediction >>> file = hf_hub_download( ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> model = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly") >>> # during training, one provides both past and future values >>> # as well as possible additional features >>> outputs = model( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... future_values=batch["future_values"], ... future_time_features=batch["future_time_features"], ... ) >>> loss = outputs.loss >>> loss.backward() >>> # during inference, one only provides past values >>> # as well as possible additional features >>> # the model autoregressively generates future values >>> outputs = model.generate( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... future_time_features=batch["future_time_features"], ... ) >>> mean_prediction = outputs.sequences.mean(dim=1) ``` <Tip> The AutoformerForPrediction can also use static_real_features. To do so, set num_static_real_features in AutoformerConfig based on number of such features in the dataset (in case of tourism_monthly dataset it is equal to 1), initialize the model and call as shown below: ``` >>> from huggingface_hub import hf_hub_download >>> import torch >>> from transformers import AutoformerConfig, AutoformerForPrediction >>> file = hf_hub_download( ... repo_id="hf-internal-testing/tourism-monthly-batch", filename="train-batch.pt", repo_type="dataset" ... ) >>> batch = torch.load(file) >>> # check number of static real features >>> num_static_real_features = batch["static_real_features"].shape[-1] >>> # load configuration of pretrained model and override num_static_real_features >>> configuration = AutoformerConfig.from_pretrained( ... "huggingface/autoformer-tourism-monthly", ... num_static_real_features=num_static_real_features, ... ) >>> # we also need to update feature_size as it is not recalculated >>> configuration.feature_size += num_static_real_features >>> model = AutoformerForPrediction(configuration) >>> outputs = model( ... past_values=batch["past_values"], ... past_time_features=batch["past_time_features"], ... past_observed_mask=batch["past_observed_mask"], ... static_categorical_features=batch["static_categorical_features"], ... static_real_features=batch["static_real_features"], ... future_values=batch["future_values"], ... future_time_features=batch["future_time_features"], ... ) ``` </Tip> """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if future_values is not None: use_cache = False outputs = self.model( past_values=past_values, past_time_features=past_time_features, past_observed_mask=past_observed_mask, static_categorical_features=static_categorical_features, static_real_features=static_real_features, future_values=future_values, future_time_features=future_time_features, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, output_hidden_states=output_hidden_states, output_attentions=output_attentions, use_cache=use_cache, return_dict=return_dict, ) prediction_loss = None params = None if future_values is not None: # outputs.last_hidden_state and trend # loc is 4rd last and scale is 3rd last output params = self.output_params(outputs[0] + outputs[1]) distribution = self.output_distribution(params, loc=outputs[-3], scale=outputs[-2]) loss = self.loss(distribution, future_values) if future_observed_mask is None: future_observed_mask = torch.ones_like(future_values) if len(self.target_shape) == 0: loss_weights = future_observed_mask else: loss_weights, _ = future_observed_mask.min(dim=-1, keepdim=False) prediction_loss = weighted_average(loss, weights=loss_weights) if not return_dict: outputs = ((params,) + outputs[2:]) if params is not None else outputs[2:] return ((prediction_loss,) + outputs) if prediction_loss is not None else outputs return Seq2SeqTSPredictionOutput( loss=prediction_loss, params=params, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, loc=outputs.loc, scale=outputs.scale, static_features=outputs.static_features, ) @torch.no_grad() def generate( self, past_values: torch.Tensor, past_time_features: torch.Tensor, future_time_features: torch.Tensor, past_observed_mask: Optional[torch.Tensor] = None, static_categorical_features: Optional[torch.Tensor] = None, static_real_features: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> SampleTSPredictionOutput: r""" Greedily generate sequences of sample predictions from a model with a probability distribution head. Parameters: past_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`): Past values of the time series, that serve as context in order to predict the future. The sequence size of this tensor must be larger than the `context_length` of the model, since the model will use the larger size to construct lag features, i.e. additional values from the past which are added in order to serve as "extra context". The `sequence_length` here is equal to `config.context_length` + `max(config.lags_sequence)`, which if no `lags_sequence` is configured, is equal to `config.context_length` + 7 (as by default, the largest look-back index in `config.lags_sequence` is 7). The property `_past_length` returns the actual length of the past. The `past_values` is what the Transformer encoder gets as input (with optional additional features, such as `static_categorical_features`, `static_real_features`, `past_time_features` and lags). Optionally, missing values need to be replaced with zeros and indicated via the `past_observed_mask`. For multivariate time series, the `input_size` > 1 dimension is required and corresponds to the number of variates in the time series per time step. past_time_features (`torch.FloatTensor` of shape `(batch_size, sequence_length, num_features)`): Required time features, which the model internally will add to `past_values`. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. future_time_features (`torch.FloatTensor` of shape `(batch_size, prediction_length, num_features)`): Required time features for the prediction window, which the model internally will add to sampled predictions. These could be things like "month of year", "day of the month", etc. encoded as vectors (for instance as Fourier features). These could also be so-called "age" features, which basically help the model know "at which point in life" a time-series is. Age features have small values for distant past time steps and increase monotonically the more we approach the current time step. Holiday features are also a good example of time features. These features serve as the "positional encodings" of the inputs. So contrary to a model like BERT, where the position encodings are learned from scratch internally as parameters of the model, the Time Series Transformer requires to provide additional time features. The Time Series Transformer only learns additional embeddings for `static_categorical_features`. Additional dynamic real covariates can be concatenated to this tensor, with the caveat that these features must but known at prediction time. The `num_features` here is equal to `config.`num_time_features` + `config.num_dynamic_real_features`. past_observed_mask (`torch.BoolTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, input_size)`, *optional*): Boolean mask to indicate which `past_values` were observed and which were missing. Mask values selected in `[0, 1]`: - 1 for values that are **observed**, - 0 for values that are **missing** (i.e. NaNs that were replaced by zeros). static_categorical_features (`torch.LongTensor` of shape `(batch_size, number of static categorical features)`, *optional*): Optional static categorical features for which the model will learn an embedding, which it will add to the values of the time series. Static categorical features are features which have the same value for all time steps (static over time). A typical example of a static categorical feature is a time series ID. static_real_features (`torch.FloatTensor` of shape `(batch_size, number of static real features)`, *optional*): Optional static real features which the model will add to the values of the time series. Static real features are features which have the same value for all time steps (static over time). A typical example of a static real feature is promotion information. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. Return: [`SampleTSPredictionOutput`] where the outputs `sequences` tensor will have shape `(batch_size, number of samples, prediction_length)` or `(batch_size, number of samples, prediction_length, input_size)` for multivariate predictions. """ outputs = self( static_categorical_features=static_categorical_features, static_real_features=static_real_features, past_time_features=past_time_features, past_values=past_values, past_observed_mask=past_observed_mask, future_time_features=None, future_values=None, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, use_cache=False, ) decoder = self.model.get_decoder() enc_last_hidden = outputs.encoder_last_hidden_state loc = outputs.loc scale = outputs.scale static_feat = outputs.static_features num_parallel_samples = self.config.num_parallel_samples repeated_loc = loc.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_scale = scale.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_past_values = ( past_values.repeat_interleave(repeats=num_parallel_samples, dim=0) - repeated_loc ) / repeated_scale time_features = torch.cat((past_time_features, future_time_features), dim=1) expanded_static_feat = static_feat.unsqueeze(1).expand(-1, time_features.shape[1], -1) features = torch.cat((expanded_static_feat, time_features), dim=-1) repeated_features = features.repeat_interleave(repeats=num_parallel_samples, dim=0) repeated_enc_last_hidden = enc_last_hidden.repeat_interleave(repeats=num_parallel_samples, dim=0) lagged_sequence = self.model.get_lagged_subsequences( sequence=repeated_past_values, subsequences_length=self.config.context_length ) lags_shape = lagged_sequence.shape reshaped_lagged_sequence = lagged_sequence.reshape(lags_shape[0], lags_shape[1], -1) seasonal_input, trend_input = self.model.decomposition_layer(reshaped_lagged_sequence) mean = torch.mean(reshaped_lagged_sequence, dim=1).unsqueeze(1).repeat(1, self.config.prediction_length, 1) zeros = torch.zeros( [reshaped_lagged_sequence.shape[0], self.config.prediction_length, reshaped_lagged_sequence.shape[2]], device=reshaped_lagged_sequence.device, ) decoder_input = torch.cat( ( torch.cat((seasonal_input[:, -self.config.label_length :, ...], zeros), dim=1), repeated_features[:, -self.config.prediction_length - self.config.label_length :, ...], ), dim=-1, ) trend_init = torch.cat( ( torch.cat((trend_input[:, -self.config.label_length :, ...], mean), dim=1), repeated_features[:, -self.config.prediction_length - self.config.label_length :, ...], ), dim=-1, ) decoder_outputs = decoder( trend=trend_init, inputs_embeds=decoder_input, encoder_hidden_states=repeated_enc_last_hidden ) decoder_last_hidden = decoder_outputs.last_hidden_state trend = decoder_outputs.trend params = self.output_params(decoder_last_hidden + trend) distr = self.output_distribution(params, loc=repeated_loc, scale=repeated_scale) future_samples = distr.sample() return SampleTSPredictionOutput( sequences=future_samples.reshape( (-1, num_parallel_samples, self.config.prediction_length) + self.target_shape, ) )
transformers/src/transformers/models/autoformer/modeling_autoformer.py/0
{ "file_path": "transformers/src/transformers/models/autoformer/modeling_autoformer.py", "repo_id": "transformers", "token_count": 45394 }
344
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This script converts a lm-head checkpoint from the "Token Dropping" implementation into a PyTorch-compatible BERT model. The official implementation of "Token Dropping" can be found in the TensorFlow Models repository: https://github.com/tensorflow/models/tree/master/official/projects/token_dropping """ import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def convert_checkpoint_to_pytorch(tf_checkpoint_path: str, config_path: str, pytorch_dump_path: str): def get_masked_lm_array(name: str): full_name = f"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE" array = tf.train.load_variable(tf_checkpoint_path, full_name) if "kernel" in name: array = array.transpose() return torch.from_numpy(array) def get_encoder_array(name: str): full_name = f"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE" array = tf.train.load_variable(tf_checkpoint_path, full_name) if "kernel" in name: array = array.transpose() return torch.from_numpy(array) def get_encoder_layer_array(layer_index: int, name: str): full_name = f"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE" array = tf.train.load_variable(tf_checkpoint_path, full_name) if "kernel" in name: array = array.transpose() return torch.from_numpy(array) def get_encoder_attention_layer_array(layer_index: int, name: str, orginal_shape): full_name = f"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE" array = tf.train.load_variable(tf_checkpoint_path, full_name) array = array.reshape(orginal_shape) if "kernel" in name: array = array.transpose() return torch.from_numpy(array) print(f"Loading model based on config from {config_path}...") config = BertConfig.from_json_file(config_path) model = BertForMaskedLM(config) # Layers for layer_index in range(0, config.num_hidden_layers): layer: BertLayer = model.bert.encoder.layer[layer_index] # Self-attention self_attn: BertSelfAttention = layer.attention.self self_attn.query.weight.data = get_encoder_attention_layer_array( layer_index, "_query_dense/kernel", self_attn.query.weight.data.shape ) self_attn.query.bias.data = get_encoder_attention_layer_array( layer_index, "_query_dense/bias", self_attn.query.bias.data.shape ) self_attn.key.weight.data = get_encoder_attention_layer_array( layer_index, "_key_dense/kernel", self_attn.key.weight.data.shape ) self_attn.key.bias.data = get_encoder_attention_layer_array( layer_index, "_key_dense/bias", self_attn.key.bias.data.shape ) self_attn.value.weight.data = get_encoder_attention_layer_array( layer_index, "_value_dense/kernel", self_attn.value.weight.data.shape ) self_attn.value.bias.data = get_encoder_attention_layer_array( layer_index, "_value_dense/bias", self_attn.value.bias.data.shape ) # Self-attention Output self_output: BertSelfOutput = layer.attention.output self_output.dense.weight.data = get_encoder_attention_layer_array( layer_index, "_output_dense/kernel", self_output.dense.weight.data.shape ) self_output.dense.bias.data = get_encoder_attention_layer_array( layer_index, "_output_dense/bias", self_output.dense.bias.data.shape ) self_output.LayerNorm.weight.data = get_encoder_layer_array(layer_index, "_attention_layer_norm/gamma") self_output.LayerNorm.bias.data = get_encoder_layer_array(layer_index, "_attention_layer_norm/beta") # Intermediate intermediate: BertIntermediate = layer.intermediate intermediate.dense.weight.data = get_encoder_layer_array(layer_index, "_intermediate_dense/kernel") intermediate.dense.bias.data = get_encoder_layer_array(layer_index, "_intermediate_dense/bias") # Output bert_output: BertOutput = layer.output bert_output.dense.weight.data = get_encoder_layer_array(layer_index, "_output_dense/kernel") bert_output.dense.bias.data = get_encoder_layer_array(layer_index, "_output_dense/bias") bert_output.LayerNorm.weight.data = get_encoder_layer_array(layer_index, "_output_layer_norm/gamma") bert_output.LayerNorm.bias.data = get_encoder_layer_array(layer_index, "_output_layer_norm/beta") # Embeddings model.bert.embeddings.position_embeddings.weight.data = get_encoder_array("_position_embedding_layer/embeddings") model.bert.embeddings.token_type_embeddings.weight.data = get_encoder_array("_type_embedding_layer/embeddings") model.bert.embeddings.LayerNorm.weight.data = get_encoder_array("_embedding_norm_layer/gamma") model.bert.embeddings.LayerNorm.bias.data = get_encoder_array("_embedding_norm_layer/beta") # LM Head lm_head = model.cls.predictions.transform lm_head.dense.weight.data = get_masked_lm_array("dense/kernel") lm_head.dense.bias.data = get_masked_lm_array("dense/bias") lm_head.LayerNorm.weight.data = get_masked_lm_array("layer_norm/gamma") lm_head.LayerNorm.bias.data = get_masked_lm_array("layer_norm/beta") model.bert.embeddings.word_embeddings.weight.data = get_masked_lm_array("embedding_table") # Pooling model.bert.pooler = BertPooler(config=config) model.bert.pooler.dense.weight.data: BertPooler = get_encoder_array("_pooler_layer/kernel") model.bert.pooler.dense.bias.data: BertPooler = get_encoder_array("_pooler_layer/bias") # Export final model model.save_pretrained(pytorch_dump_path) # Integration test - should load without any errors ;) new_model = BertForMaskedLM.from_pretrained(pytorch_dump_path) print(new_model.eval()) print("Model conversion was done sucessfully!") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) args = parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
transformers/src/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/bert/convert_bert_token_dropping_original_tf2_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 3033 }
345
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BiT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices logger = logging.get_logger(__name__) class BitConfig(BackboneConfigMixin, PretrainedConfig): r""" This is the configuration class to store the configuration of a [`BitModel`]. It is used to instantiate an BiT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BiT [google/bit-50](https://huggingface.co/google/bit-50) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: num_channels (`int`, *optional*, defaults to 3): The number of input channels. embedding_size (`int`, *optional*, defaults to 64): Dimensionality (hidden size) for the embedding layer. hidden_sizes (`List[int]`, *optional*, defaults to `[256, 512, 1024, 2048]`): Dimensionality (hidden size) at each stage. depths (`List[int]`, *optional*, defaults to `[3, 4, 6, 3]`): Depth (number of layers) for each stage. layer_type (`str`, *optional*, defaults to `"preactivation"`): The layer to use, it can be either `"preactivation"` or `"bottleneck"`. hidden_act (`str`, *optional*, defaults to `"relu"`): The non-linear activation function in each block. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. global_padding (`str`, *optional*): Padding strategy to use for the convolutional layers. Can be either `"valid"`, `"same"`, or `None`. num_groups (`int`, *optional*, defaults to 32): Number of groups used for the `BitGroupNormActivation` layers. drop_path_rate (`float`, *optional*, defaults to 0.0): The drop path rate for the stochastic depth. embedding_dynamic_padding (`bool`, *optional*, defaults to `False`): Whether or not to make use of dynamic padding for the embedding layer. output_stride (`int`, *optional*, defaults to 32): The output stride of the model. width_factor (`int`, *optional*, defaults to 1): The width factor for the model. out_features (`List[str]`, *optional*): If used as backbone, list of features to output. Can be any of `"stem"`, `"stage1"`, `"stage2"`, etc. (depending on how many stages the model has). If unset and `out_indices` is set, will default to the corresponding stages. If unset and `out_indices` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. out_indices (`List[int]`, *optional*): If used as backbone, list of indices of features to output. Can be any of 0, 1, 2, etc. (depending on how many stages the model has). If unset and `out_features` is set, will default to the corresponding stages. If unset and `out_features` is unset, will default to the last stage. Must be in the same order as defined in the `stage_names` attribute. Example: ```python >>> from transformers import BitConfig, BitModel >>> # Initializing a BiT bit-50 style configuration >>> configuration = BitConfig() >>> # Initializing a model (with random weights) from the bit-50 style configuration >>> model = BitModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` """ model_type = "bit" layer_types = ["preactivation", "bottleneck"] supported_padding = ["SAME", "VALID"] def __init__( self, num_channels=3, embedding_size=64, hidden_sizes=[256, 512, 1024, 2048], depths=[3, 4, 6, 3], layer_type="preactivation", hidden_act="relu", global_padding=None, num_groups=32, drop_path_rate=0.0, embedding_dynamic_padding=False, output_stride=32, width_factor=1, out_features=None, out_indices=None, **kwargs, ): super().__init__(**kwargs) if layer_type not in self.layer_types: raise ValueError(f"layer_type={layer_type} is not one of {','.join(self.layer_types)}") if global_padding is not None: if global_padding.upper() in self.supported_padding: global_padding = global_padding.upper() else: raise ValueError(f"Padding strategy {global_padding} not supported") self.num_channels = num_channels self.embedding_size = embedding_size self.hidden_sizes = hidden_sizes self.depths = depths self.layer_type = layer_type self.hidden_act = hidden_act self.global_padding = global_padding self.num_groups = num_groups self.drop_path_rate = drop_path_rate self.embedding_dynamic_padding = embedding_dynamic_padding self.output_stride = output_stride self.width_factor = width_factor self.stage_names = ["stem"] + [f"stage{idx}" for idx in range(1, len(depths) + 1)] self._out_features, self._out_indices = get_aligned_output_features_output_indices( out_features=out_features, out_indices=out_indices, stage_names=self.stage_names )
transformers/src/transformers/models/bit/configuration_bit.py/0
{ "file_path": "transformers/src/transformers/models/bit/configuration_bit.py", "repo_id": "transformers", "token_count": 2352 }
346
# coding=utf-8 # Copyright 2021 The Facebook, Inc and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 BlenderbotSmall model.""" from __future__ import annotations import random from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPastAndCrossAttentions, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, ) # Public API from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFPreTrainedModel, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_blenderbot_small import BlenderbotSmallConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/blenderbot_small-90M" _CONFIG_FOR_DOC = "BlenderbotSmallConfig" LARGE_NEGATIVE = -1e8 # Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int): pad_token_id = tf.cast(pad_token_id, input_ids.dtype) decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype) start_tokens = tf.fill( (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype) ) shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1) # replace possible -100 values in labels by `pad_token_id` shifted_input_ids = tf.where( shifted_input_ids == -100, tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)), shifted_input_ids, ) # "Verify that `labels` has only positive values and -100" assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype)) # Make sure the assertion op is called by wrapping the result in an identity no-op with tf.control_dependencies([assert_gte0]): shifted_input_ids = tf.identity(shifted_input_ids) return shifted_input_ids # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz = input_ids_shape[0] tgt_len = input_ids_shape[1] mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE # Copied from transformers.models.blenderbot.modeling_tf_blenderbot.TFBlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall class TFBlenderbotSmallLearnedPositionalEmbedding(keras.layers.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): super().__init__(num_embeddings, embedding_dim, **kwargs) def call( self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: tf.Tensor | None = None ): """Input is expected to be of size [bsz x seqlen].""" if position_ids is None: seq_len = input_shape[1] position_ids = tf.range(seq_len, delta=1, name="range") position_ids += past_key_values_length return super().call(tf.cast(position_ids, dtype=tf.int32)) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->BlenderbotSmall class TFBlenderbotSmallAttention(keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: tf.Tensor | None = None, past_key_value: Tuple[Tuple[tf.Tensor]] | None = None, attention_mask: tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor | None]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {shape_list(attn_weights)}" ), ) if attention_mask is not None: tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {shape_list(attention_mask)}" ), ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = stable_softmax(attn_weights, axis=-1) if layer_head_mask is not None: tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=( f"Head mask for a single layer should be of size {(self.num_heads)}, but is" f" {shape_list(layer_head_mask)}" ), ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {shape_list(attn_output)}" ), ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "k_proj", None) is not None: with tf.name_scope(self.k_proj.name): self.k_proj.build([None, None, self.embed_dim]) if getattr(self, "q_proj", None) is not None: with tf.name_scope(self.q_proj.name): self.q_proj.build([None, None, self.embed_dim]) if getattr(self, "v_proj", None) is not None: with tf.name_scope(self.v_proj.name): self.v_proj.build([None, None, self.embed_dim]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.embed_dim]) # Copied from transformers.models.bart.modeling_tf_bart.TFBartEncoderLayer with Bart->BlenderbotSmall class TFBlenderbotSmallEncoderLayer(keras.layers.Layer): def __init__(self, config: BlenderbotSmallConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFBlenderbotSmallAttention( self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" ) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None, layer_head_mask: tf.Tensor | None, training: Optional[bool] = False, ) -> tf.Tensor: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)` """ residual = hidden_states hidden_states, self_attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask ) tf.debugging.assert_equal( shape_list(hidden_states), shape_list(residual), message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) return hidden_states, self_attn_weights def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.encoder_ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) # Copied from transformers.models.bart.modeling_tf_bart.TFBartDecoderLayer with Bart->BlenderbotSmall class TFBlenderbotSmallDecoderLayer(keras.layers.Layer): def __init__(self, config: BlenderbotSmallConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFBlenderbotSmallAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = keras.layers.Dropout(config.activation_dropout) self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.encoder_attn = TFBlenderbotSmallAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name="encoder_attn", is_decoder=True, ) self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1") self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") self.config = config def call( self, hidden_states: tf.Tensor, attention_mask: np.ndarray | tf.Tensor | None = None, encoder_hidden_states: np.ndarray | tf.Tensor | None = None, encoder_attention_mask: np.ndarray | tf.Tensor | None = None, layer_head_mask: tf.Tensor | None = None, cross_attn_layer_head_mask: tf.Tensor | None = None, past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, training: Optional[bool] = False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`tf.Tensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`tf.Tensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size `(decoder_attention_heads,)` cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module. `(decoder_attention_heads,)` past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) return ( hidden_states, self_attn_weights, cross_attn_weights, present_key_value, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attn", None) is not None: with tf.name_scope(self.self_attn.name): self.self_attn.build(None) if getattr(self, "self_attn_layer_norm", None) is not None: with tf.name_scope(self.self_attn_layer_norm.name): self.self_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "encoder_attn", None) is not None: with tf.name_scope(self.encoder_attn.name): self.encoder_attn.build(None) if getattr(self, "encoder_attn_layer_norm", None) is not None: with tf.name_scope(self.encoder_attn_layer_norm.name): self.encoder_attn_layer_norm.build([None, None, self.embed_dim]) if getattr(self, "fc1", None) is not None: with tf.name_scope(self.fc1.name): self.fc1.build([None, None, self.embed_dim]) if getattr(self, "fc2", None) is not None: with tf.name_scope(self.fc2.name): self.fc2.build([None, None, self.config.decoder_ffn_dim]) if getattr(self, "final_layer_norm", None) is not None: with tf.name_scope(self.final_layer_norm.name): self.final_layer_norm.build([None, None, self.embed_dim]) class TFBlenderbotSmallPreTrainedModel(TFPreTrainedModel): config_class = BlenderbotSmallConfig base_model_prefix = "model" BLENDERBOT_SMALL_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`BlenderbotSmallConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights. """ BLENDERBOT_SMALL_GENERATION_EXAMPLE = r""" Conversation example:: ```py >>> from transformers import AutoTokenizer, TFBlenderbotSmallForConditionalGeneration >>> mname = "facebook/blenderbot_small-90M" >>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname) >>> tokenizer = AutoTokenizer.from_pretrained(mname) >>> UTTERANCE = "My friends are cool but they eat too many carbs." >>> print("Human: ", UTTERANCE) >>> inputs = tokenizer([UTTERANCE], return_tensors="tf") >>> reply_ids = model.generate(**inputs) >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]) what kind of carbs do they eat? i don't know much about carbs. >>> REPLY = "I'm not sure" >>> print("Human: ", REPLY) >>> NEXT_UTTERANCE = ( ... "My friends are cool but they eat too many carbs.</s> " ... "<s>what kind of carbs do they eat? i don't know much about carbs.</s> " ... "<s>I'm not sure." ... ) >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="tf") >>> inputs.pop("token_type_ids") >>> next_reply_ids = model.generate(**inputs) >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0]) ``` """ BLENDERBOT_SMALL_INPUTS_DOCSTRING = r""" Args: input_ids (`tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are decoder input IDs?](../glossary#decoder-input-ids) BlenderbotSmall uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*): will be made by default and ignore pad tokens. It is not recommended to set this for most use cases. decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tf.FloatTensor`, *optional*): hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape `(batch_size, sequence_length, hidden_size)` is a sequence of past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). Set to `False` during training, `True` during generation output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @keras_serializable class TFBlenderbotSmallEncoder(keras.layers.Layer): config_class = BlenderbotSmallConfig """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`TFBlenderbotSmallEncoderLayer`]. Args: config: BlenderbotSmallConfig """ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = keras.layers.Dropout(config.dropout) self.layerdrop = config.encoder_layerdrop self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.embed_tokens = embed_tokens self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, name="embed_positions", ) self.layers = [TFBlenderbotSmallEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") self.embed_dim = config.d_model def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens @unpack_inputs def call( self, input_ids=None, inputs_embeds=None, attention_mask=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): """ Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout(hidden_states, training=training) # check attention mask and invert if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask) else: attention_mask = None encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: tf.debugging.assert_equal( shape_list(head_mask)[0], len(self.layers), message=( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(head_mask)[0]}." ), ) # encoder layers for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): # skip the layer continue hidden_states, attn = encoder_layer( hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, ) if output_attentions: all_attentions += (attn,) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "layernorm_embedding", None) is not None: with tf.name_scope(self.layernorm_embedding.name): self.layernorm_embedding.build([None, None, self.embed_dim]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFBlenderbotSmallDecoder(keras.layers.Layer): config_class = BlenderbotSmallConfig """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBlenderbotSmallDecoderLayer`] Args: config: BlenderbotSmallConfig embed_tokens: output embedding """ def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.embed_tokens = embed_tokens self.layerdrop = config.decoder_layerdrop self.embed_positions = TFBlenderbotSmallLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, name="embed_positions", ) self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.layers = [TFBlenderbotSmallDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") self.dropout = keras.layers.Dropout(config.dropout) def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens @unpack_inputs def call( self, input_ids=None, inputs_embeds=None, attention_mask=None, position_ids=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, cross_attn_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" Args: input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0 if inputs_embeds is None: check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim) inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) else: combined_attention_mask = _expand_mask( tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1] ) if attention_mask is not None: combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1]) if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1]) # embed positions if position_ids is None: positions = self.embed_positions(input_shape, past_key_values_length) else: positions = self.embed_positions(input_shape, position_ids=position_ids) hidden_states = self.layernorm_embedding(inputs_embeds) + positions hidden_states = self.dropout(hidden_states, training=training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None present_key_values = () if use_cache else None # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]: if attn_mask is not None: tf.debugging.assert_equal( shape_list(attn_mask)[0], len(self.layers), message=( f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for" f" {shape_list(attn_mask)[0]}." ), ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer( hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, past_key_value=past_key_value, ) if use_cache: present_key_values += (present_key_value,) if output_attentions: all_self_attns += (layer_self_attn,) if encoder_hidden_states is not None: all_cross_attns += (layer_cross_attn,) if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns else: return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attns, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embed_positions", None) is not None: with tf.name_scope(self.embed_positions.name): self.embed_positions.build(None) if getattr(self, "layernorm_embedding", None) is not None: with tf.name_scope(self.layernorm_embedding.name): self.layernorm_embedding.build([None, None, self.config.d_model]) if getattr(self, "layers", None) is not None: for layer in self.layers: with tf.name_scope(layer.name): layer.build(None) @keras_serializable class TFBlenderbotSmallMainLayer(keras.layers.Layer): config_class = BlenderbotSmallConfig def __init__(self, config: BlenderbotSmallConfig, **kwargs): super().__init__(**kwargs) self.config = config self.shared = keras.layers.Embedding( input_dim=config.vocab_size, output_dim=config.d_model, embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std), name="model.shared", ) # Additional attribute to specify the expected name scope of the layer (for loading/storing weights) self.shared.load_weight_prefix = "model.shared" self.encoder = TFBlenderbotSmallEncoder(config, self.shared, name="encoder") self.decoder = TFBlenderbotSmallDecoder(config, self.shared, name="decoder") def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared = new_embeddings self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared @unpack_inputs def call( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_position_ids=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput): encoder_outputs = TFBaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False elif not return_dict and not isinstance(encoder_outputs, tuple): encoder_outputs = encoder_outputs.to_tuple() decoder_outputs = self.decoder( decoder_input_ids, attention_mask=decoder_attention_mask, position_ids=decoder_position_ids, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) if not return_dict: return decoder_outputs + encoder_outputs return TFSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True # The shared/tied weights expect to be in the model base namespace # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than # the current one. with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"): self.shared.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "decoder", None) is not None: with tf.name_scope(self.decoder.name): self.decoder.build(None) @add_start_docstrings( "The bare BLENDERBOT_SMALL Model outputting raw hidden-states without any specific head on top.", BLENDERBOT_SMALL_START_DOCSTRING, ) class TFBlenderbotSmallModel(TFBlenderbotSmallPreTrainedModel): def __init__(self, config: BlenderbotSmallConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFBlenderbotSmallMainLayer(config, name="model") def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder @unpack_inputs @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, decoder_input_ids: tf.Tensor | None = None, decoder_attention_mask: tf.Tensor | None = None, decoder_position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, decoder_head_mask: tf.Tensor | None = None, cross_attn_head_mask: tf.Tensor | None = None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, past_key_values: List[tf.Tensor] | None = None, inputs_embeds: tf.Tensor | None = None, decoder_inputs_embeds: tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, **kwargs, ) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]: outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqModelOutput( last_hidden_state=output.last_hidden_state, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) # Copied from transformers.models.bart.modeling_tf_bart.BiasLayer class BiasLayer(keras.layers.Layer): """ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis, so all weights have to be registered in a layer. """ def __init__(self, shape, initializer, trainable, name, **kwargs): super().__init__(name=name, **kwargs) # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see: # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214 self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable) def call(self, x): return x + self.bias @add_start_docstrings( "The BLENDERBOT_SMALL Model with a language modeling head. Can be used for summarization.", BLENDERBOT_SMALL_START_DOCSTRING, ) class TFBlenderbotSmallForConditionalGeneration(TFBlenderbotSmallPreTrainedModel, TFCausalLanguageModelingLoss): _keys_to_ignore_on_load_unexpected = [ r"model.encoder.embed_tokens.weight", r"model.decoder.embed_tokens.weight", ] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFBlenderbotSmallMainLayer(config, name="model") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency. self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) def get_decoder(self): return self.model.decoder def get_encoder(self): return self.model.encoder def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def get_bias(self): return {"final_logits_bias": self.bias_layer.bias} def set_bias(self, value): # Replaces the existing layers containing bias for correct (de)serialization. vocab_size = value["final_logits_bias"].shape[-1] self.bias_layer = BiasLayer( name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False ) self.bias_layer.bias.assign(value["final_logits_bias"]) @unpack_inputs @add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE) def call( self, input_ids: tf.Tensor | None = None, attention_mask: tf.Tensor | None = None, decoder_input_ids: tf.Tensor | None = None, decoder_attention_mask: tf.Tensor | None = None, decoder_position_ids: tf.Tensor | None = None, head_mask: tf.Tensor | None = None, decoder_head_mask: tf.Tensor | None = None, cross_attn_head_mask: tf.Tensor | None = None, encoder_outputs: Optional[TFBaseModelOutput] = None, past_key_values: List[tf.Tensor] | None = None, inputs_embeds: tf.Tensor | None = None, decoder_inputs_embeds: tf.Tensor | None = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]: r""" labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: """ if labels is not None: labels = tf.where( labels == self.config.pad_token_id, tf.cast(tf.fill(shape_list(labels), -100), labels.dtype), labels, ) use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_position_ids=decoder_position_ids, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True) lm_logits = self.bias_layer(lm_logits) masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return TFSeq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, # index 1 of d outputs decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs cross_attentions=outputs.cross_attentions, # index 4 of d outputs encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out encoder_attentions=outputs.encoder_attentions, # 2 of e out ) # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqLMOutput( logits=output.logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, cross_attentions=cross_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past_key_values is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] if decoder_attention_mask is not None: # xla decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:] elif past_key_values is not None: # no xla + past_key_values decoder_position_ids = past_key_values[0][0].shape[2] else: # no xla + no past_key_values decoder_position_ids = tf.range(decoder_input_ids.shape[1]) return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "decoder_position_ids": decoder_position_ids, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "model", None) is not None: with tf.name_scope(self.model.name): self.model.build(None) if getattr(self, "bias_layer", None) is not None: with tf.name_scope(self.bias_layer.name): self.bias_layer.build(None)
transformers/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py/0
{ "file_path": "transformers/src/transformers/models/blenderbot_small/modeling_tf_blenderbot_small.py", "repo_id": "transformers", "token_count": 31343 }
347
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Processor class for BLIP-2. """ from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import ( AddedToken, BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy, ) from ...utils import TensorType, logging logger = logging.get_logger(__name__) class Blip2Processor(ProcessorMixin): r""" Constructs a BLIP-2 processor which wraps a BLIP image processor and an OPT/T5 tokenizer into a single processor. [`BlipProcessor`] offers all the functionalities of [`BlipImageProcessor`] and [`AutoTokenizer`]. See the docstring of [`~BlipProcessor.__call__`] and [`~BlipProcessor.decode`] for more information. Args: image_processor (`BlipImageProcessor`): An instance of [`BlipImageProcessor`]. The image processor is a required input. tokenizer (`AutoTokenizer`): An instance of ['PreTrainedTokenizer`]. The tokenizer is a required input. num_query_tokens (`int`, *optional*): Number of tokens used by the Qformer as queries, should be same as in model's config. """ attributes = ["image_processor", "tokenizer"] valid_kwargs = ["num_query_tokens"] image_processor_class = "BlipImageProcessor" tokenizer_class = "AutoTokenizer" def __init__(self, image_processor, tokenizer, num_query_tokens=None, **kwargs): tokenizer.return_token_type_ids = False self.current_processor = image_processor self.image_token = AddedToken("<image>", normalized=False, special=True) tokenizer.add_tokens([self.image_token], special_tokens=True) self.num_query_tokens = num_query_tokens super().__init__(image_processor, tokenizer) def __call__( self, images: ImageInput = None, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_token_type_ids: bool = False, return_length: bool = False, verbose: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ) -> BatchEncoding: """ This method uses [`BlipImageProcessor.__call__`] method to prepare image(s) for the model, and [`BertTokenizerFast.__call__`] to prepare text for the model. Please refer to the docstring of the above two methods for more information. """ if images is None and text is None: raise ValueError("You have to specify either images or text.") # Get only text if images is None: self.current_processor = self.tokenizer text_encoding = self.tokenizer( text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs, ) return text_encoding # add pixel_values encoding_image_processor = self.image_processor(images, return_tensors=return_tensors) if text is not None: if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): raise ValueError("Invalid input text. Please provide a string, or a list of strings") text_encoding = {} _text_encoding = self.tokenizer( text=text, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_token_type_ids=return_token_type_ids, return_length=return_length, verbose=verbose, return_tensors=None, # hardcode "None" here for prepending image tokens **kwargs, ) # if we know how many query tokens, expand text inside processor. We need this hacky manipulation # because BLIP expects image tokens to be at the beginning even before BOS token if self.num_query_tokens is not None: image_tokens = self.image_token.content * self.num_query_tokens image_token_encoding = self.tokenizer([image_tokens], add_special_tokens=False, return_tensors=None) for k in _text_encoding: text_encoding[k] = [ img_encoding + txt_encoding for img_encoding, txt_encoding in zip(image_token_encoding[k], _text_encoding[k]) ] else: text_encoding = _text_encoding logger.warning_once( "Expanding inputs for image tokens in BLIP-2 should be done in processing. " "Please follow instruction here (https://gist.github.com/zucchini-nlp/e9f20b054fa322f84ac9311d9ab67042) to update your BLIP-2 model. " "Using processors without these attributes in the config is deprecated and will throw an error in v4.47." ) # cast to desired return tensors type text_encoding = BatchEncoding(text_encoding, tensor_type=return_tensors) else: text_encoding = None if text_encoding is not None: encoding_image_processor.update(text_encoding) return encoding_image_processor # Copied from transformers.models.blip.processing_blip.BlipProcessor.batch_decode with BertTokenizerFast->PreTrainedTokenizer def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) # Copied from transformers.models.blip.processing_blip.BlipProcessor.decode with BertTokenizerFast->PreTrainedTokenizer def decode(self, *args, **kwargs): """ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names image_processor_input_names = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
transformers/src/transformers/models/blip_2/processing_blip_2.py/0
{ "file_path": "transformers/src/transformers/models/blip_2/processing_blip_2.py", "repo_id": "transformers", "token_count": 3713 }
348
# coding=utf-8 # Copyright 2024 Meta Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """chameleon model configuration""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class ChameleonVQVAEConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ChameleonVQModel`]. It is used to instantiate a `ChameleonVQModel` according to the specified arguments, defining the model architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Instantiating a configuration with the defaults will yield a similar configuration to the VQModel of the [meta/chameleon-7B](https://huggingface.co/meta/chameleon-7B). Args: embed_dim (`int`, *optional*, defaults to 256): Dimensionality of each embedding vector. num_embeddings (`int`, *optional*, defaults to 8192): Number of codebook embeddings. double_latent (`bool`, *optional*, defaults to `False`): Whether to use double z channels. latent_channels (`int`, *optional*, defaults to 256): Number of channels for the latent space. resolution (`int`, *optional*, defaults to 512): Resolution of the input images. in_channels (`int`, *optional*, defaults to 3): Number of input channels. base_channels (`int`, *optional*, defaults to 128): Base channel count. channel_multiplier (`List[int]`, *optional*, defaults to `[1, 1, 2, 2, 4]`): Channel multipliers for each resolution. num_res_blocks (`int`, *optional*, defaults to 2): Number of residual blocks. attn_resolutions (`List[int]`, *optional*): Resolutions to apply attention. dropout (`float`, *optional*, defaults to 0.0): Dropout rate. attn_type (`str`, *optional*, defaults to `"vanilla"`): Attention type used in VQ-GAN encoder. Can be "vanilla" or None. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. """ model_type = "chameleon_vqgan" def __init__( self, embed_dim: int = 256, num_embeddings: int = 8192, double_latent: bool = False, latent_channels: int = 256, resolution: int = 512, in_channels: int = 3, base_channels: int = 128, channel_multiplier: List[int] = [1, 1, 2, 2, 4], num_res_blocks: int = 2, attn_resolutions: List[int] = None, dropout: float = 0.0, attn_type: str = "vanilla", initializer_range=0.02, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_embeddings = num_embeddings self.double_latent = double_latent self.latent_channels = latent_channels self.resolution = resolution self.in_channels = in_channels self.base_channels = base_channels self.channel_multiplier = channel_multiplier self.num_res_blocks = num_res_blocks self.attn_resolutions = attn_resolutions self.dropout = dropout self.attn_type = attn_type self.initializer_range = initializer_range class ChameleonConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ChameleonModel`]. It is used to instantiate a chameleon model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the [meta/chameleon-7B](https://huggingface.co/meta/chameleon-7B). Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 65536): Vocabulary size of the chameleon model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ChameleonModel`]; this includes text and image tokens. hidden_size (`int`, *optional*, defaults to 4096): Dimension of the hidden representations. intermediate_size (`int`, *optional*, defaults to 11008): Dimension of the MLP representations. num_hidden_layers (`int`, *optional*, defaults to 32): Number of hidden layers in the Transformer decoder. num_attention_heads (`int`, *optional*, defaults to 32): Number of attention heads for each attention layer in the Transformer decoder. num_key_value_heads (`int`, *optional*, defaults to 32): This is the number of key_value heads that should be used to implement Grouped Query Attention. If `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed by meanpooling all the original heads within that group. For more details checkout [this paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `num_attention_heads`. hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): The non-linear activation function (function or string) in the decoder. max_position_embeddings (`int`, *optional*, defaults to 4096): The maximum sequence length that this model might ever be used with. Chameleon supports up to 4096 tokens. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. rms_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the rms normalization layers. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. pad_token_id (`int`, *optional*): Padding token id. bos_token_id (`int`, *optional*, defaults to 1): Beginning of stream token id. eos_token_id (`int`, *optional*, defaults to 2): End of stream token id. tie_word_embeddings (`bool`, *optional*, defaults to `False`): Whether to tie weight embeddings rope_theta (`float`, *optional*, defaults to 10000.0): The base period of the RoPE embeddings. rope_scaling (`Dict`, *optional*): Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update `max_position_embeddings` to the expected new maximum. See the following thread for more information on how these scaling strategies behave: https://www.reddit.com/r/Localchameleon/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an experimental feature, subject to breaking API changes in future versions. attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): Whether to use a bias in the query, key, value and output projection layers during self-attention. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. model_parallel_size (`int`, *optional*, defaults to 1): Number of shards used when training the model. This will be used in qk layernorm because the original Chameleon inference doesn't do reduction in those layers and each rank has its own biases. swin_norm (`bool`, *optional*, defaults to `False`): Use Swin Transformer normalization. vq_config (`dict`, *optional*): ChameleonVQConfig instance containing the configuration for the VQ-VAE model. vocabulary_map (`dict`, *optional*): A dictionary containing the vocabulary map from the tokenizer. Used to obtain tokens from the image inputs. mlp_bias (`bool`, *optional*, defaults to `False`): Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers. ```python >>> from transformers import ChameleonModel, ChameleonConfig >>> # Initializing a chameleon chameleon-7b style configuration >>> configuration = ChameleonConfig() >>> # Initializing a model from the chameleon-7b style configuration >>> model = ChameleonModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "chameleon" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=65536, hidden_size=4096, intermediate_size=11008, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=32, hidden_act="silu", max_position_embeddings=4096, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, tie_word_embeddings=False, rope_theta=10000.0, rope_scaling=None, attention_bias=False, attention_dropout=0.0, model_parallel_size=1, swin_norm=False, vq_config=None, vocabulary_map=None, mlp_bias=False, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.mlp_bias = mlp_bias self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.use_cache = use_cache self.rope_theta = rope_theta self.rope_scaling = rope_scaling self._rope_scaling_validation() self.attention_bias = attention_bias self.attention_dropout = attention_dropout self.model_parallel_size = model_parallel_size self.swin_norm = swin_norm if vq_config is None: vq_config = {} logger.info("vq_config is None. initializing the ChameleonVQConfig with default values.") self.vq_config = ChameleonVQVAEConfig(**vq_config) self.vocabulary_map = vocabulary_map super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) def _rope_scaling_validation(self): """ Validate the `rope_scaling` configuration. """ if self.rope_scaling is None: return if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, " f"got {self.rope_scaling}" ) rope_scaling_type = self.rope_scaling.get("type", None) rope_scaling_factor = self.rope_scaling.get("factor", None) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" ) if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0: raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
transformers/src/transformers/models/chameleon/configuration_chameleon.py/0
{ "file_path": "transformers/src/transformers/models/chameleon/configuration_chameleon.py", "repo_id": "transformers", "token_count": 5169 }
349
# coding=utf-8 # Copyright 2023 The LAION-AI Team and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch CLAP model.""" import collections import math from dataclasses import dataclass from typing import Any, List, Optional, Tuple, Union import torch import torch.nn.functional as F from torch import nn from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int, ) from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "laion/clap-htsat-fused" # Adapted from: https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/utils.py#L191 def interpolate(hidden_states, ratio): """ Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN. Args: hidden_states (`torch.FloatTensor` of shape (batch_size, time_length, classes_num)): Input hidden states ratio (`int`): The ratio of the length of the output to the length of the input. """ (batch_size, time_length, classes_num) = hidden_states.shape upsampled = hidden_states[:, :, None, :].repeat(1, 1, ratio, 1) upsampled = upsampled.reshape(batch_size, time_length * ratio, classes_num) return upsampled # Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L249 def window_partition(hidden_states, window_size): """ Returns the resized hidden states. The output shape should be `(batch_size * num_windows, window_size, window_size, num_channels)` Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, height, width, num_channels)`): Input hidden states window_size (`int`): Window size """ batch_size, height, width, num_channels = hidden_states.shape hidden_states = hidden_states.view( batch_size, height // window_size, window_size, width // window_size, window_size, num_channels ) windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows # Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L263 def window_reverse(windows, window_size, height, width): """ Merges windows to produce higher resolution features. Args: windows (`torch.FloatTensor` of shape `(num_windows * batch_size, window_size, window_size, num_channels)`): Input windows window_size (`int`): Window size height (`int`): Height of the resized audio width (`int`): Width of the resized audio """ num_channels = windows.shape[-1] windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels) windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) return windows # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html#CLIP-loss-function def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: labels = torch.arange(len(logits), device=logits.device) return nn.functional.cross_entropy(logits, labels) @dataclass # Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Clap class ClapTextModelOutput(ModelOutput): """ Base class for text model's outputs that also contains a pooling of the last hidden states. Args: text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`): The text embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ text_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class ClapAudioModelOutput(ModelOutput): """ ClapAudio model output to mimic the output of the original implementation. Args: audio_embeds (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): The Audio embeddings obtained by applying the projection layer to the pooler_output. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ audio_embeds: Optional[torch.FloatTensor] = None last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->Clap, vision->audio, Vision->Audio, image->audio class ClapOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for audio-text similarity. logits_per_audio (`torch.FloatTensor` of shape `(audio_batch_size, text_batch_size)`): The scaled dot product scores between `audio_embeds` and `text_embeds`. This represents the audio-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, audio_batch_size)`): The scaled dot product scores between `text_embeds` and `audio_embeds`. This represents the text-audio similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`ClapTextModel`]. audio_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The audio embeddings obtained by applying the projection layer to the pooled output of [`ClapAudioModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`ClapTextModel`]. audio_model_output (`BaseModelOutputWithPooling`): The output of the [`ClapAudioModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_audio: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None audio_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None audio_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "audio_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) # Adapted from transformers.models.swin.modeling_swin.SwinDropPath class ClapDropPath(nn.Module): """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is a slightly refactored version of the `SwinDropPath` implementation. """ def __init__(self, drop_prob=None): super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states): if self.drop_prob == 0.0 or not self.training: return hidden_states keep_prob = 1 - self.drop_prob # work with diff dim tensors, not just 2D ConvNets shape = (hidden_states.shape[0],) + (1,) * (hidden_states.ndim - 1) random_tensor = keep_prob + torch.rand(shape, dtype=hidden_states.dtype, device=hidden_states.device) random_tensor.floor_() # binarize output = hidden_states.div(keep_prob) * random_tensor return output # Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/feature_fusion.py#L133 class ClapAudioAFFBlock(nn.Module): r""" ATTENTIONAL FEATURE FUSION Block from CLAP, since in CLAP we are always in 2D mode, it is not needed to implement the 1D version. """ def __init__(self, config: ClapAudioConfig): super().__init__() channels = config.patch_embeds_hidden_size downsize_ratio = config.aff_block_r inter_channels = int(channels // downsize_ratio) self.local_att = nn.Sequential( nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(inter_channels), nn.ReLU(inplace=True), nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(channels), ) self.global_att = nn.Sequential( nn.AdaptiveAvgPool2d(1), nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(inter_channels), nn.ReLU(inplace=True), nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(channels), ) self.sigmoid = nn.Sigmoid() def forward(self, hidden_states, residual): attention_input = hidden_states + residual fused_layer_output = self.local_att(attention_input) + self.global_att(attention_input) fused_layer_output = self.sigmoid(fused_layer_output) output = 2 * hidden_states * fused_layer_output + 2 * residual * (1 - fused_layer_output) return output class ClapAudioPatchEmbed(nn.Module): """ This module converts the hidden states reshaped as an image to patch embeddings ready to be passed to the Transformer block. """ def __init__(self, config: ClapAudioConfig): super().__init__() img_size = (config.spec_size, config.spec_size) if isinstance(config.spec_size, int) else config.spec_size patch_size = ( (config.patch_size, config.patch_size) if isinstance(config.patch_size, int) else config.patch_size ) patch_stride = ( (config.patch_stride, config.patch_stride) if isinstance(config.patch_stride, int) else config.patch_stride ) self.img_size = img_size self.patch_stride = patch_stride self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] self.flatten = config.flatten_patch_embeds self.enable_fusion = config.enable_fusion padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2) scale_factor = 4 if (self.enable_fusion) and (config.fusion_type == "channel_map") else 1 self.proj = nn.Conv2d( config.patch_embed_input_channels * scale_factor, config.patch_embeds_hidden_size, kernel_size=patch_size, stride=patch_stride, padding=padding, ) self.norm = nn.LayerNorm(config.patch_embeds_hidden_size) if config.enable_patch_layer_norm else nn.Identity() if self.enable_fusion: self.fusion_model = ClapAudioAFFBlock(config) self.mel_conv2d = nn.Conv2d( config.patch_embed_input_channels, config.patch_embeds_hidden_size, kernel_size=(patch_size[0], patch_size[1] * 3), stride=(patch_stride[0], patch_stride[1] * 3), padding=padding, ) def forward(self, hidden_states, is_longer_idx=None): if self.enable_fusion: # retrieve the last mel as we have transposed the input global_hidden_states = hidden_states[:, 0:1, :, :] # global processing batch_size, num_channels, height, width = global_hidden_states.shape if height != self.img_size[0] or width != self.img_size[1]: raise ValueError( f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." ) global_hidden_states = self.proj(global_hidden_states) output_width = global_hidden_states.size(-1) if len(is_longer_idx) > 0: # local processing local_hidden_states = hidden_states[is_longer_idx, 1:, :, :].contiguous() batch_size, num_channels, height, width = local_hidden_states.shape local_hidden_states = local_hidden_states.view(batch_size * num_channels, 1, height, width) local_hidden_states = self.mel_conv2d(local_hidden_states) _, features, height, width = local_hidden_states.shape local_hidden_states = local_hidden_states.view(batch_size, num_channels, features, height, width) local_hidden_states = local_hidden_states.permute((0, 2, 3, 1, 4)).contiguous().flatten(3) local_width = local_hidden_states.size(-1) local_hidden_states = torch.nn.functional.pad( local_hidden_states, (0, output_width - local_width), "constant", 0 ) global_hidden_states[is_longer_idx] = self.fusion_model( global_hidden_states[is_longer_idx], local_hidden_states ) hidden_states = global_hidden_states else: _, _, height, width = hidden_states.shape if height != self.img_size[0] or width != self.img_size[1]: raise ValueError( f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." ) hidden_states = self.proj(hidden_states) if self.flatten: hidden_states = hidden_states.flatten(2).transpose(1, 2) hidden_states = self.norm(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->ClapAudio class ClapAudioSelfAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.window_size = ( window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size) ) self.relative_position_bias_table = nn.Parameter( torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads) ) # get pair-wise relative position index for each token inside the window coords_h = torch.arange(self.window_size[0]) coords_w = torch.arange(self.window_size[1]) coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij")) coords_flatten = torch.flatten(coords, 1) relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] relative_coords = relative_coords.permute(1, 2, 0).contiguous() relative_coords[:, :, 0] += self.window_size[0] - 1 relative_coords[:, :, 1] += self.window_size[1] - 1 relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 relative_position_index = relative_coords.sum(-1) self.register_buffer("relative_position_index", relative_position_index) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: batch_size, dim, num_channels = hidden_states.shape mixed_query_layer = self.query(hidden_states) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)] relative_position_bias = relative_position_bias.view( self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 ) relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() attention_scores = attention_scores + relative_position_bias.unsqueeze(0) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in ClapAudioModel forward() function) mask_shape = attention_mask.shape[0] attention_scores = attention_scores.view( batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim ) attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0) attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs # Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->ClapAudio class ClapAudioSelfOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->ClapAudio class ClapAudioAttention(nn.Module): def __init__(self, config, dim, num_heads, window_size): super().__init__() self.self = ClapAudioSelfAttention(config, dim, num_heads, window_size) self.output = ClapAudioSelfOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->ClapAudio class ClapAudioIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->ClapAudio class ClapAudioOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states # Copied from transformers.models.swin.modeling_swin.SwinLayer with SwinDropPath->ClapDropPath, Swin->ClapAudio class ClapAudioLayer(nn.Module): def __init__(self, config, dim, input_resolution, num_heads, shift_size=0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.shift_size = shift_size self.window_size = config.window_size self.input_resolution = input_resolution self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = ClapAudioAttention(config, dim, num_heads, window_size=self.window_size) self.drop_path = ClapDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = ClapAudioIntermediate(config, dim) self.output = ClapAudioOutput(config, dim) def set_shift_and_window_size(self, input_resolution): if min(input_resolution) <= self.window_size: # if window size is larger than input resolution, we don't partition windows self.shift_size = torch_int(0) self.window_size = ( torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution) ) def get_attn_mask(self, height, width, dtype, device): if self.shift_size > 0: # calculate attention mask for SW-MSA img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device) height_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) width_slices = ( slice(0, -self.window_size), slice(-self.window_size, -self.shift_size), slice(-self.shift_size, None), ) count = 0 for height_slice in height_slices: for width_slice in width_slices: img_mask[:, height_slice, width_slice, :] = count count += 1 mask_windows = window_partition(img_mask, self.window_size) mask_windows = mask_windows.view(-1, self.window_size * self.window_size) attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) else: attn_mask = None return attn_mask def maybe_pad(self, hidden_states, height, width): pad_right = (self.window_size - width % self.window_size) % self.window_size pad_bottom = (self.window_size - height % self.window_size) % self.window_size pad_values = (0, 0, 0, pad_right, 0, pad_bottom) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, always_partition: Optional[bool] = False, ) -> Tuple[torch.Tensor, torch.Tensor]: if not always_partition: self.set_shift_and_window_size(input_dimensions) else: pass height, width = input_dimensions batch_size, _, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) hidden_states = hidden_states.view(batch_size, height, width, channels) # pad hidden_states to multiples of window size hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape # cyclic shift if self.shift_size > 0: shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) else: shifted_hidden_states = hidden_states # partition windows hidden_states_windows = window_partition(shifted_hidden_states, self.window_size) hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels) attn_mask = self.get_attn_mask( height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device ) attention_outputs = self.attention( hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions ) attention_output = attention_outputs[0] attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels) shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad) # reverse cyclic shift if self.shift_size > 0: attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) else: attention_windows = shifted_windows was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_windows = attention_windows[:, :height, :width, :].contiguous() attention_windows = attention_windows.view(batch_size, height * width, channels) hidden_states = shortcut + self.drop_path(attention_windows) layer_output = self.layernorm_after(hidden_states) layer_output = self.intermediate(layer_output) layer_output = hidden_states + self.output(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs # Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->ClapAudio class ClapAudioStage(nn.Module): def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample): super().__init__() self.config = config self.dim = dim self.blocks = nn.ModuleList( [ ClapAudioLayer( config=config, dim=dim, input_resolution=input_resolution, num_heads=num_heads, shift_size=0 if (i % 2 == 0) else config.window_size // 2, ) for i in range(depth) ] ) # patch merging layer if downsample is not None: self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward( self, hidden_states: torch.Tensor, input_dimensions: Tuple[int, int], head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, always_partition: Optional[bool] = False, ) -> Tuple[torch.Tensor]: height, width = input_dimensions for i, layer_module in enumerate(self.blocks): layer_head_mask = head_mask[i] if head_mask is not None else None layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition ) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2 output_dimensions = (height, width, height_downsampled, width_downsampled) hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions) else: output_dimensions = (height, width, height, width) stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs # Copied from transformers.models.swin.modeling_swin.SwinPatchMerging with Swin->ClapAudio class ClapAudioPatchMerging(nn.Module): """ Patch Merging Layer. Args: input_resolution (`Tuple[int]`): Resolution of input feature. dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.input_resolution = input_resolution self.dim = dim self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) self.norm = norm_layer(4 * dim) def maybe_pad(self, input_feature, height, width): should_pad = (height % 2 == 1) or (width % 2 == 1) if should_pad: pad_values = (0, 0, 0, width % 2, 0, height % 2) input_feature = nn.functional.pad(input_feature, pad_values) return input_feature def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor: height, width = input_dimensions # `dim` is height * width batch_size, dim, num_channels = input_feature.shape input_feature = input_feature.view(batch_size, height, width, num_channels) # pad input to be disible by width and height, if needed input_feature = self.maybe_pad(input_feature, height, width) # [batch_size, height/2, width/2, num_channels] input_feature_0 = input_feature[:, 0::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_1 = input_feature[:, 1::2, 0::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_2 = input_feature[:, 0::2, 1::2, :] # [batch_size, height/2, width/2, num_channels] input_feature_3 = input_feature[:, 1::2, 1::2, :] # batch_size height/2 width/2 4*num_channels input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1) input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C input_feature = self.norm(input_feature) input_feature = self.reduction(input_feature) return input_feature class ClapAudioEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_layers = len(config.depths) self.config = config self.patch_embed = ClapAudioPatchEmbed(config) self.enable_fusion = config.enable_fusion self.patch_stride = self.patch_embed.patch_stride self.spec_size = config.spec_size self.freq_ratio = config.spec_size // config.num_mel_bins self.num_features = int(config.patch_embeds_hidden_size * 2 ** (self.num_layers - 1)) drop_path_rate = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] grid_size = self.patch_embed.grid_size self.input_resolutions = [(grid_size[0] // (2**i), grid_size[1] // (2**i)) for i in range(self.num_layers)] self.layers = nn.ModuleList( [ ClapAudioStage( config=config, dim=int(config.patch_embeds_hidden_size * 2**i_layer), input_resolution=self.input_resolutions[i_layer], depth=config.depths[i_layer], num_heads=config.num_attention_heads[i_layer], drop_path=drop_path_rate[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=ClapAudioPatchMerging if (i_layer < self.num_layers - 1) else None, ) for i_layer in range(self.num_layers) ] ) self.gradient_checkpointing = False self.batch_norm = nn.BatchNorm2d(config.num_mel_bins) self.norm = nn.LayerNorm(self.num_features) self.depths = config.depths self.avgpool = nn.AdaptiveAvgPool1d(1) def reshape_mel2img(self, normalized_input_features): """ The input is 4 normalized log mel spectrograms. It is reshape to the common shape of images. Each channel should represent 1 of the 4 crops of the spectrogram. For more details, refer to the [`ClapFeatureExtractor`]. """ _, _, time_length, freq_length = normalized_input_features.shape spec_width = int(self.spec_size * self.freq_ratio) spec_heigth = self.spec_size // self.freq_ratio if time_length > spec_width or freq_length > spec_heigth: raise ValueError("the wav size should be less than or equal to the swin input size") # to avoid bicubic zero error if time_length < spec_width: normalized_input_features = nn.functional.interpolate( normalized_input_features, (spec_width, freq_length), mode="bicubic", align_corners=True ) if freq_length < spec_heigth: normalized_input_features = nn.functional.interpolate( normalized_input_features, (time_length, spec_heigth), mode="bicubic", align_corners=True ) batch, channels, time, freq = normalized_input_features.shape # batch_size, channels, spec_width, spec_heigth --> batch_size, channels, spec_heigth * freq_ratio, spec_width // freq_ratio normalized_input_features = normalized_input_features.reshape( batch, channels * self.freq_ratio, time // self.freq_ratio, freq ) normalized_input_features = normalized_input_features.permute(0, 1, 3, 2).contiguous() normalized_input_features = normalized_input_features.reshape( batch, channels, freq * self.freq_ratio, time // self.freq_ratio ) return normalized_input_features def forward( self, input_features, is_longer: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, output_hidden_states_before_downsampling: Optional[bool] = False, always_partition: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, ClapAudioModelOutput]: input_features = input_features.transpose(1, 3) normalized_input_features = self.batch_norm(input_features) normalized_input_features = normalized_input_features.transpose(1, 3) is_longer_list_idx = None if self.enable_fusion: is_longer_list = is_longer.to(input_features.device) is_longer_list_idx = torch.where(is_longer_list == 1)[0] hidden_states = self.reshape_mel2img(normalized_input_features) frames_num = hidden_states.shape[2] hidden_states = self.patch_embed(hidden_states, is_longer_list_idx) all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None input_dimensions = self.input_resolutions[0] if output_hidden_states: batch_size, _, hidden_size = hidden_states.shape # rearrange batch_size (height width) channels -> batch_size channel height width reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.layers): layer_head_mask = head_mask[i] if head_mask is not None else None input_dimensions = self.input_resolutions[i] if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, input_dimensions, layer_head_mask, output_attentions ) else: layer_outputs = layer_module( hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition ) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] output_dimensions = layer_outputs[2] input_dimensions = (output_dimensions[-2], output_dimensions[-1]) if output_hidden_states and output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states_before_downsampling.shape # rearrange batch_size (height width) channels -> batch_size channel height width # here we use the original (not downsampled) height and width reshaped_hidden_state = hidden_states_before_downsampling.view( batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size ) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and not output_hidden_states_before_downsampling: batch_size, _, hidden_size = hidden_states.shape # rearrange batch_size (height width) channels -> batch_size channel height width reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size) reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[3:] last_hidden_state = self.norm(hidden_states) batch_size, _, n_channels = last_hidden_state.shape freq_shape = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0] temporal_shape = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1] last_hidden_state = ( last_hidden_state.permute(0, 2, 1).contiguous().reshape(batch_size, n_channels, freq_shape, temporal_shape) ) batch_size, n_channels, n_frequencies, n_temp = last_hidden_state.shape # group 2D CNN c_freq_bin = n_frequencies // self.freq_ratio last_hidden_state = last_hidden_state.reshape( batch_size, n_channels, n_frequencies // c_freq_bin, c_freq_bin, n_temp ) last_hidden_state = ( last_hidden_state.permute(0, 1, 3, 2, 4).contiguous().reshape(batch_size, n_channels, c_freq_bin, -1) ) latent_output = self.avgpool(torch.flatten(last_hidden_state, 2)) latent_output = torch.flatten(latent_output, 1) if not return_dict: return tuple( v for v in [ last_hidden_state, latent_output, all_reshaped_hidden_states, all_self_attentions, ] if v is not None ) return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=latent_output, hidden_states=all_reshaped_hidden_states, attentions=all_self_attentions, ) CLAP_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ClapConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CLAP_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ CLAP_AUDIO_INPUTS_DOCSTRING = r""" Args: input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Input audio features. This should be returnes by the [`ClapFeatureExtractor`] class that you can also retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details. is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*): Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance the features. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ CLAP_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Input audio features. This should be returnes by the [`ClapFeatureExtractor`] class that you can also retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class ClapProjectionLayer(nn.Module): def __init__(self, config: Union[ClapAudioConfig, ClapTextConfig]): super().__init__() self.config = config hidden_size = config.hidden_size projection_dim = config.projection_dim self.linear1 = nn.Linear(hidden_size, projection_dim) self.activation = ACT2FN[config.projection_hidden_act] self.linear2 = nn.Linear(projection_dim, projection_dim) def forward(self, hidden_states): hidden_states = self.linear1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.linear2(hidden_states) return hidden_states # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->ClapText, persistent=False->persistent=True class ClapTextEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=True ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=True ) # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ClapText class ClapTextSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in ClapTextModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class ClapTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states CLAP_TEXT_SELF_ATTENTION_CLASSES = { "eager": ClapTextSelfAttention, } # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ClapText,BERT->CLAP_TEXT class ClapTextAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = CLAP_TEXT_SELF_ATTENTION_CLASSES[config._attn_implementation]( config, position_embedding_type=position_embedding_type ) self.output = ClapTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class ClapTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class ClapTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ClapText class ClapTextLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ClapTextAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = ClapTextAttention(config, position_embedding_type="absolute") self.intermediate = ClapTextIntermediate(config) self.output = ClapTextOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ClapText class ClapTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ClapTextLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPooler class ClapTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class ClapPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ClapConfig base_model_prefix = "clap" supports_gradient_checkpointing = False def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, ClapTextEmbeddings): module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02) module.token_type_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, ClapModel): nn.init.normal_(module.logit_scale_a, std=factor * 0.02) nn.init.normal_(module.logit_scale_t, std=factor * 0.02) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, (nn.Conv2d, nn.Linear)): in_proj_std = (self.config.hidden_size**-0.5) * ((2 * self.config.num_hidden_layers) ** -0.5) * factor nn.init.normal_(module.weight, std=in_proj_std) if module.bias is not None: module.bias.data.zero_() class ClapAudioModel(ClapPreTrainedModel): config_class = ClapAudioConfig main_input_name = "input_features" def __init__(self, config: ClapAudioConfig): super().__init__(config) self.audio_encoder = ClapAudioEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.audio_encoder.patch_embed.proj @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ClapAudioConfig) def forward( self, input_features: Optional[torch.FloatTensor] = None, is_longer: Optional[torch.BoolTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from datasets import load_dataset >>> from transformers import AutoProcessor, ClapAudioModel >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> model = ClapAudioModel.from_pretrained("laion/clap-htsat-fused") >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-fused") >>> inputs = processor(audios=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return self.audio_encoder( input_features=input_features, is_longer=is_longer, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class ClapTextModel(ClapPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in *Attention is all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762 """ config_class = ClapTextConfig def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = ClapTextEmbeddings(config) self.encoder = ClapTextEncoder(config) self.pooler = ClapTextPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings(CLAP_START_DOCSTRING) class ClapModel(ClapPreTrainedModel): config_class = ClapConfig def __init__(self, config: ClapConfig): super().__init__(config) if not isinstance(config.text_config, ClapTextConfig): raise TypeError( "config.text_config is expected to be of type ClapTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.audio_config, ClapAudioConfig): raise TypeError( "config.audio_config is expected to be of type ClapAudioConfig but is of type" f" {type(config.audio_config)}." ) text_config = config.text_config audio_config = config.audio_config self.logit_scale_a = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value))) self.logit_scale_t = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value))) self.projection_dim = config.projection_dim self.text_model = ClapTextModel(text_config) self.text_projection = ClapProjectionLayer(text_config) self.audio_model = ClapAudioModel(audio_config) self.audio_projection = ClapProjectionLayer(audio_config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CLAP_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`ClapTextModel`]. Examples: ```python >>> from transformers import AutoTokenizer, ClapModel >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused") >>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused") >>> inputs = tokenizer(["the sound of a cat", "the sound of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" # Use CLAP model's config for some fields (if specified) instead of those of audio & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[1] if return_dict is not None else text_outputs.pooler_output text_features = self.text_projection(pooled_output) text_features = F.normalize(text_features, dim=-1) return text_features @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING) def get_audio_features( self, input_features: Optional[torch.Tensor] = None, is_longer: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: audio_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The audio embeddings obtained by applying the projection layer to the pooled output of [`ClapAudioModel`]. Examples: ```python >>> from transformers import AutoFeatureExtractor, ClapModel >>> import torch >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused") >>> random_audio = torch.rand((16_000)) >>> inputs = feature_extractor(random_audio, return_tensors="pt") >>> audio_features = model.get_audio_features(**inputs) ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict audio_outputs = self.audio_model( input_features=input_features, is_longer=is_longer, return_dict=return_dict, ) pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output audio_features = self.audio_projection(pooled_output) audio_features = F.normalize(audio_features, dim=-1) return audio_features @add_start_docstrings_to_model_forward(CLAP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ClapOutput, config_class=ClapConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, input_features: Optional[torch.FloatTensor] = None, is_longer: Optional[torch.BoolTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ClapOutput]: r""" Returns: Examples: ```python >>> from datasets import load_dataset >>> from transformers import AutoProcessor, ClapModel >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused") >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-unfused") >>> input_text = ["Sound of a dog", "Sound of vaccum cleaner"] >>> inputs = processor(text=input_text, audios=audio_sample, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_audio = outputs.logits_per_audio # this is the audio-text similarity score >>> probs = logits_per_audio.softmax(dim=-1) # we can take the softmax to get the label probabilities ```""" # Use CLAP model's config for some fields (if specified) instead of those of audio & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict audio_outputs = self.audio_model( input_features=input_features, is_longer=is_longer, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) audio_embeds = audio_outputs[1] if not return_dict else audio_outputs.pooler_output audio_embeds = self.audio_projection(audio_embeds) text_embeds = text_outputs[1] if not return_dict else text_outputs.pooler_output text_embeds = self.text_projection(text_embeds) # normalized features audio_embeds = audio_embeds / audio_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale_text = self.logit_scale_t.exp() logit_scale_audio = self.logit_scale_a.exp() logits_per_text = torch.matmul(text_embeds, audio_embeds.t()) * logit_scale_text logits_per_audio = torch.matmul(audio_embeds, text_embeds.t()) * logit_scale_audio loss = None if return_loss: caption_loss = contrastive_loss(logits_per_text) audio_loss = contrastive_loss(logits_per_audio.t()) loss = (caption_loss + audio_loss) / 2.0 if not return_dict: output = (logits_per_audio, logits_per_text, text_embeds, audio_embeds, text_outputs, audio_outputs) return ((loss,) + output) if loss is not None else output return ClapOutput( loss=loss, logits_per_audio=logits_per_audio, logits_per_text=logits_per_text, text_embeds=text_embeds, audio_embeds=audio_embeds, text_model_output=text_outputs, audio_model_output=audio_outputs, ) @add_start_docstrings( """ CLAP Text Model with a projection layer on top (a linear layer on top of the pooled output). """, CLAP_START_DOCSTRING, ) class ClapTextModelWithProjection(ClapPreTrainedModel): config_class = ClapTextConfig def __init__(self, config: ClapTextConfig): super().__init__(config) self.text_model = ClapTextModel(config) self.text_projection = ClapProjectionLayer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.word_embeddings def set_input_embeddings(self, value): self.text_model.embeddings.word_embeddings = value @add_start_docstrings_to_model_forward(CLAP_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ClapTextModelOutput, config_class=ClapTextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ClapTextModelOutput]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, ClapTextModelWithProjection >>> model = ClapTextModelWithProjection.from_pretrained("laion/clap-htsat-unfused") >>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused") >>> inputs = tokenizer(["a sound of a cat", "a sound of a dog"], padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> text_embeds = outputs.text_embeds ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[1] if not return_dict else text_outputs.pooler_output text_embeds = self.text_projection(pooled_output) if not return_dict: outputs = (text_embeds, text_outputs[0]) + text_outputs[2:] return tuple(output for output in outputs if output is not None) return ClapTextModelOutput( text_embeds=text_embeds, last_hidden_state=text_outputs.last_hidden_state, hidden_states=text_outputs.hidden_states, attentions=text_outputs.attentions, ) @add_start_docstrings( """ CLAP Audio Model with a projection layer on top (a linear layer on top of the pooled output). """, CLAP_START_DOCSTRING, ) class ClapAudioModelWithProjection(ClapPreTrainedModel): config_class = ClapAudioConfig main_input_name = "input_features" def __init__(self, config: ClapAudioConfig): super().__init__(config) self.audio_model = ClapAudioModel(config) self.audio_projection = ClapProjectionLayer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.audio_model.audio_encoder.patch_embed.proj @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ClapAudioModelOutput, config_class=ClapAudioConfig) def forward( self, input_features: Optional[torch.FloatTensor] = None, is_longer: Optional[torch.BoolTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ClapAudioModelOutput]: r""" Returns: Examples: ```python >>> from datasets import load_dataset >>> from transformers import ClapAudioModelWithProjection, ClapProcessor >>> model = ClapAudioModelWithProjection.from_pretrained("laion/clap-htsat-fused") >>> processor = ClapProcessor.from_pretrained("laion/clap-htsat-fused") >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") >>> audio_sample = dataset["train"]["audio"][0]["array"] >>> inputs = processor(audios=audio_sample, return_tensors="pt") >>> outputs = model(**inputs) >>> audio_embeds = outputs.audio_embeds ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) audio_outputs = self.audio_model( input_features=input_features, is_longer=is_longer, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output audio_embeds = self.audio_projection(pooled_output) if not return_dict: outputs = (audio_embeds, audio_outputs[0]) + audio_outputs[2:] return tuple(output for output in outputs if output is not None) return ClapAudioModelOutput( audio_embeds=audio_embeds, last_hidden_state=audio_outputs.last_hidden_state, attentions=audio_outputs.attentions, hidden_states=audio_outputs.hidden_states, )
transformers/src/transformers/models/clap/modeling_clap.py/0
{ "file_path": "transformers/src/transformers/models/clap/modeling_clap.py", "repo_id": "transformers", "token_count": 44333 }
350
# coding=utf-8 # Copyright 2022 The OpenAI Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch CLIPSeg model.""" import copy import math from dataclasses import dataclass from typing import Any, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling from ...modeling_utils import PreTrainedModel from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_clipseg import CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "CIDAS/clipseg-rd64-refined" # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) # Copied from transformers.models.clip.modeling_clip.clip_loss with clip->clipseg def clipseg_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @dataclass # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->CLIPSeg class CLIPSegOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegVisionModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`CLIPSegTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`CLIPSegVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) @dataclass class CLIPSegDecoderOutput(ModelOutput): """ Args: logits (`torch.FloatTensor` of shape `(batch_size, height, width)`): Classification scores for each pixel. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class CLIPSegImageSegmentationOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. ... vision_model_output (`BaseModelOutputWithPooling`): The output of the [`CLIPSegVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None conditional_embeddings: torch.FloatTensor = None pooled_output: torch.FloatTensor = None vision_model_output: BaseModelOutputWithPooling = None decoder_output: CLIPSegDecoderOutput = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["vision_model_output", "decoder_output"] else getattr(self, k).to_tuple() for k in self.keys() ) class CLIPSegVisionEmbeddings(nn.Module): # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings.__init__ with CLIP->CLIPSeg def __init__(self, config: CLIPSegVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_position_embeddings(self, new_size): if len(new_size) != 2: raise ValueError("new_size should consist of 2 values") num_patches_one_direction = int(self.num_patches**0.5) # we interpolate the position embeddings in 2D a = self.position_embedding.weight[1:].T.view( 1, self.config.hidden_size, num_patches_one_direction, num_patches_one_direction ) b = ( nn.functional.interpolate(a, new_size, mode="bicubic", align_corners=False) .squeeze(0) .view(self.config.hidden_size, new_size[0] * new_size[1]) .T ) result = torch.cat([self.position_embedding.weight[:1], b]) return result def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if embeddings.shape[1] != self.num_positions: new_shape = int(math.sqrt(embeddings.shape[1] - 1)) embeddings = embeddings + self.interpolate_position_embeddings((new_shape, new_shape)) embeddings = embeddings.to(embeddings.dtype) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->CLIPSeg class CLIPSegTextEmbeddings(nn.Module): def __init__(self, config: CLIPSegTextConfig): super().__init__() embed_dim = config.hidden_size self.token_embedding = nn.Embedding(config.vocab_size, embed_dim) self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ) -> torch.Tensor: seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if inputs_embeds is None: inputs_embeds = self.token_embedding(input_ids) position_embeddings = self.position_embedding(position_ids) embeddings = inputs_embeds + position_embeddings return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->CLIPSeg class CLIPSegAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) # apply the causal_attention_mask first if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {causal_attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->CLIPSeg class CLIPSegMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoderLayer with AltCLIP->CLIPSeg class CLIPSegEncoderLayer(nn.Module): def __init__(self, config: CLIPSegConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = CLIPSegAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = CLIPSegMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class CLIPSegPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = CLIPSegConfig base_model_prefix = "clip" supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, CLIPSegTextEmbeddings): module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02) elif isinstance(module, CLIPSegVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, CLIPSegAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, CLIPSegMLP): factor = self.config.initializer_factor in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, CLIPSegModel): nn.init.normal_( module.text_projection.weight, std=module.text_embed_dim**-0.5 * self.config.initializer_factor, ) nn.init.normal_( module.visual_projection.weight, std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, ) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() CLIPSEG_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`CLIPSegConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CLIPSEG_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ CLIPSEG_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ CLIPSEG_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoder with AltCLIP->CLIPSeg class CLIPSegEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`CLIPSegEncoderLayer`]. Args: config: CLIPSegConfig """ def __init__(self, config: CLIPSegConfig): super().__init__() self.config = config self.layers = nn.ModuleList([CLIPSegEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class CLIPSegTextTransformer(nn.Module): def __init__(self, config: CLIPSegTextConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = CLIPSegTextEmbeddings(config) self.encoder = CLIPSegEncoder(config) self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) # For `pooled_output` computation self.eos_token_id = config.eos_token_id @add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegTextConfig) # Adapted from transformers.models.clip.modeling_clip.CLIPTextTransformer.forward with clip->clipseg, CLIP->CLIPSeg def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is None: raise ValueError("You have to specify input_ids") input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids) # CLIPSeg's text model uses causal mask, prepare it here. # https://github.com/openai/CLIPSeg/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clipseg/model.py#L324 causal_attention_mask = _create_4d_causal_attention_mask( input_shape, hidden_states.dtype, device=hidden_states.device ) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype) encoder_outputs = self.encoder( inputs_embeds=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] last_hidden_state = self.final_layer_norm(last_hidden_state) if self.eos_token_id == 2: # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here. # A CLIPSeg model with such `eos_token_id` in the config can't work correctly with extra new tokens added # ------------------------------------------------------------ # text_embeds.shape = [batch_size, sequence_length, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14 pooled_output = last_hidden_state[ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1), ] else: # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible) pooled_output = last_hidden_state[ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device), # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`) # Note: we assume each sequence (along batch dim.) contains an `eos_token_id` (e.g. prepared by the tokenizer) (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id) .int() .argmax(dim=-1), ] if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class CLIPSegTextModel(CLIPSegPreTrainedModel): config_class = CLIPSegTextConfig _no_split_modules = ["CLIPSegTextEmbeddings", "CLIPSegEncoderLayer"] def __init__(self, config: CLIPSegTextConfig): super().__init__(config) self.text_model = CLIPSegTextTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.text_model.embeddings.token_embedding def set_input_embeddings(self, value): self.text_model.embeddings.token_embedding = value @add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegTextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, CLIPSegTextModel >>> tokenizer = AutoTokenizer.from_pretrained("CIDAS/clipseg-rd64-refined") >>> model = CLIPSegTextModel.from_pretrained("CIDAS/clipseg-rd64-refined") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled (EOS token) states ```""" return self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) class CLIPSegVisionTransformer(nn.Module): # Copied from transformers.models.altclip.modeling_altclip.AltCLIPVisionTransformer.__init__ with AltCLIP->CLIPSeg def __init__(self, config: CLIPSegVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = CLIPSegVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = CLIPSegEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegVisionConfig) # Copied from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class CLIPSegVisionModel(CLIPSegPreTrainedModel): config_class = CLIPSegVisionConfig main_input_name = "pixel_values" def __init__(self, config: CLIPSegVisionConfig): super().__init__(config) self.vision_model = CLIPSegVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPSegVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, CLIPSegVisionModel >>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") >>> model = CLIPSegVisionModel.from_pretrained("CIDAS/clipseg-rd64-refined") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) @add_start_docstrings(CLIPSEG_START_DOCSTRING) class CLIPSegModel(CLIPSegPreTrainedModel): config_class = CLIPSegConfig def __init__(self, config: CLIPSegConfig): super().__init__(config) if not isinstance(config.text_config, CLIPSegTextConfig): raise TypeError( "config.text_config is expected to be of type CLIPSegTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, CLIPSegVisionConfig): raise TypeError( "config.vision_config is expected to be of type CLIPSegVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = CLIPSegTextTransformer(text_config) self.vision_model = CLIPSegVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CLIPSEG_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegTextModel`]. Examples: ```python >>> from transformers import AutoTokenizer, CLIPSegModel >>> tokenizer = AutoTokenizer.from_pretrained("CIDAS/clipseg-rd64-refined") >>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined") >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" # Use CLIPSEG model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(CLIPSEG_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPSegVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, CLIPSegModel >>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") >>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" # Use CLIPSEG model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(CLIPSEG_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CLIPSegOutput, config_class=CLIPSegConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CLIPSegOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, CLIPSegModel >>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") >>> model = CLIPSegModel.from_pretrained("CIDAS/clipseg-rd64-refined") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use CLIPSEG model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = clipseg_loss(logits_per_text) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return CLIPSegOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) class CLIPSegDecoderLayer(nn.Module): """ CLIPSeg decoder layer, which is identical to `CLIPSegEncoderLayer`, except that normalization is applied after self-attention/MLP, rather than before. """ # Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoderLayer.__init__ with AltCLIP->CLIPSeg def __init__(self, config: CLIPSegConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = CLIPSegAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = CLIPSegMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states hidden_states = self.layer_norm1(hidden_states) residual = hidden_states hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states hidden_states = self.layer_norm2(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class CLIPSegDecoder(CLIPSegPreTrainedModel): def __init__(self, config: CLIPSegConfig): super().__init__(config) self.conditional_layer = config.conditional_layer self.film_mul = nn.Linear(config.projection_dim, config.reduce_dim) self.film_add = nn.Linear(config.projection_dim, config.reduce_dim) if config.use_complex_transposed_convolution: transposed_kernels = (config.vision_config.patch_size // 4, config.vision_config.patch_size // 4) self.transposed_convolution = nn.Sequential( nn.Conv2d(config.reduce_dim, config.reduce_dim, kernel_size=3, padding=1), nn.ReLU(), nn.ConvTranspose2d( config.reduce_dim, config.reduce_dim // 2, kernel_size=transposed_kernels[0], stride=transposed_kernels[0], ), nn.ReLU(), nn.ConvTranspose2d( config.reduce_dim // 2, 1, kernel_size=transposed_kernels[1], stride=transposed_kernels[1] ), ) else: self.transposed_convolution = nn.ConvTranspose2d( config.reduce_dim, 1, config.vision_config.patch_size, stride=config.vision_config.patch_size ) depth = len(config.extract_layers) self.reduces = nn.ModuleList( [nn.Linear(config.vision_config.hidden_size, config.reduce_dim) for _ in range(depth)] ) decoder_config = copy.deepcopy(config.vision_config) decoder_config.hidden_size = config.reduce_dim decoder_config.num_attention_heads = config.decoder_num_attention_heads decoder_config.intermediate_size = config.decoder_intermediate_size decoder_config.hidden_act = "relu" self.layers = nn.ModuleList([CLIPSegDecoderLayer(decoder_config) for _ in range(len(config.extract_layers))]) def forward( self, hidden_states: Tuple[torch.Tensor], conditional_embeddings: torch.Tensor, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = True, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None activations = hidden_states[::-1] output = None for i, (activation, layer, reduce) in enumerate(zip(activations, self.layers, self.reduces)): if output is not None: output = reduce(activation) + output else: output = reduce(activation) if i == self.conditional_layer: output = self.film_mul(conditional_embeddings) * output.permute(1, 0, 2) + self.film_add( conditional_embeddings ) output = output.permute(1, 0, 2) layer_outputs = layer( output, attention_mask=None, causal_attention_mask=None, output_attentions=output_attentions ) output = layer_outputs[0] if output_hidden_states: all_hidden_states += (output,) if output_attentions: all_attentions += (layer_outputs[1],) output = output[:, 1:, :].permute(0, 2, 1) # remove cls token and reshape to [batch_size, reduce_dim, seq_len] size = int(math.sqrt(output.shape[2])) batch_size = conditional_embeddings.shape[0] output = output.view(batch_size, output.shape[1], size, size) logits = self.transposed_convolution(output).squeeze(1) if not return_dict: return tuple(v for v in [logits, all_hidden_states, all_attentions] if v is not None) return CLIPSegDecoderOutput( logits=logits, hidden_states=all_hidden_states, attentions=all_attentions, ) @add_start_docstrings( """ CLIPSeg model with a Transformer-based decoder on top for zero-shot and one-shot image segmentation. """, CLIPSEG_START_DOCSTRING, ) class CLIPSegForImageSegmentation(CLIPSegPreTrainedModel): config_class = CLIPSegConfig def __init__(self, config: CLIPSegConfig): super().__init__(config) self.config = config self.clip = CLIPSegModel(config) self.extract_layers = config.extract_layers self.decoder = CLIPSegDecoder(config) # Initialize weights and apply final processing self.post_init() def get_conditional_embeddings( self, batch_size: int = None, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, conditional_pixel_values: Optional[torch.Tensor] = None, ): if input_ids is not None: # compute conditional embeddings from texts if len(input_ids) != batch_size: raise ValueError("Make sure to pass as many prompt texts as there are query images") with torch.no_grad(): conditional_embeddings = self.clip.get_text_features( input_ids, attention_mask=attention_mask, position_ids=position_ids ) elif conditional_pixel_values is not None: # compute conditional embeddings from images if len(conditional_pixel_values) != batch_size: raise ValueError("Make sure to pass as many prompt images as there are query images") with torch.no_grad(): conditional_embeddings = self.clip.get_image_features(conditional_pixel_values) else: raise ValueError( "Invalid conditional, should be either provided as `input_ids` or `conditional_pixel_values`" ) return conditional_embeddings @add_start_docstrings_to_model_forward(CLIPSEG_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CLIPSegImageSegmentationOutput, config_class=CLIPSegTextConfig) def forward( self, input_ids: Optional[torch.FloatTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, conditional_pixel_values: Optional[torch.FloatTensor] = None, conditional_embeddings: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CLIPSegOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Examples: ```python >>> from transformers import AutoProcessor, CLIPSegForImageSegmentation >>> from PIL import Image >>> import requests >>> processor = AutoProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") >>> model = CLIPSegForImageSegmentation.from_pretrained("CIDAS/clipseg-rd64-refined") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["a cat", "a remote", "a blanket"] >>> inputs = processor(text=texts, images=[image] * len(texts), padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> print(logits.shape) torch.Size([3, 352, 352]) ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # step 1: forward the query images through the frozen CLIP vision encoder with torch.no_grad(): vision_outputs = self.clip.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=True, # we need the intermediate hidden states return_dict=return_dict, ) pooled_output = self.clip.visual_projection(vision_outputs[1]) hidden_states = vision_outputs.hidden_states if return_dict else vision_outputs[2] # we add +1 here as the hidden states also include the initial embeddings activations = [hidden_states[i + 1] for i in self.extract_layers] # update vision_outputs if return_dict: vision_outputs = BaseModelOutputWithPooling( last_hidden_state=vision_outputs.last_hidden_state, pooler_output=vision_outputs.pooler_output, hidden_states=vision_outputs.hidden_states if output_hidden_states else None, attentions=vision_outputs.attentions, ) else: vision_outputs = ( vision_outputs[:2] + vision_outputs[3:] if not output_hidden_states else vision_outputs ) # step 2: compute conditional embeddings, either from text, images or an own provided embedding if conditional_embeddings is None: conditional_embeddings = self.get_conditional_embeddings( batch_size=pixel_values.shape[0], input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, conditional_pixel_values=conditional_pixel_values, ) else: if conditional_embeddings.shape[0] != pixel_values.shape[0]: raise ValueError( "Make sure to pass as many conditional embeddings as there are query images in the batch" ) if conditional_embeddings.shape[1] != self.config.projection_dim: raise ValueError( "Make sure that the feature dimension of the conditional embeddings matches" " `config.projection_dim`." ) # step 3: forward both the pooled output and the activations through the lightweight decoder to predict masks decoder_outputs = self.decoder( activations, conditional_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = decoder_outputs.logits if return_dict else decoder_outputs[0] loss = None if labels is not None: # move labels to the correct device to enable PP labels = labels.to(logits.device) loss_fn = nn.BCEWithLogitsLoss() loss = loss_fn(logits, labels) if not return_dict: output = (logits, conditional_embeddings, pooled_output, vision_outputs, decoder_outputs) return ((loss,) + output) if loss is not None else output return CLIPSegImageSegmentationOutput( loss=loss, logits=logits, conditional_embeddings=conditional_embeddings, pooled_output=pooled_output, vision_model_output=vision_outputs, decoder_output=decoder_outputs, )
transformers/src/transformers/models/clipseg/modeling_clipseg.py/0
{ "file_path": "transformers/src/transformers/models/clipseg/modeling_clipseg.py", "repo_id": "transformers", "token_count": 27548 }
351
# coding=utf-8 # Copyright 2022 The Salesforce authors, The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for CodeGen""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np import regex as re from ...utils import is_tf_available, is_torch_available, logging, to_py_obj if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from ...tokenization_utils import AddedToken, PreTrainedTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class CodeGenTokenizer(PreTrainedTokenizer): """ Construct a CodeGen tokenizer. Based on byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ```python >>> from transformers import CodeGenTokenizer >>> tokenizer = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono") >>> tokenizer("Hello world")["input_ids"] [15496, 995] >>> tokenizer(" Hello world")["input_ids"] [18435, 995] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The beginning of sequence token. eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`): The end of sequence token. pad_token (`str`, *optional*): The token used for padding, for example when batching sequences of different lengths. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (CodeGen tokenizer detect beginning of words by the preceding space). add_bos_token (`bool`, *optional*, defaults to `False`): Whether to add a beginning of sequence token at the start of sequences. return_token_type_ids (`bool`, *optional*, defaults to `False`): Whether to return token type IDs. """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, merges_file, errors="replace", unk_token="<|endoftext|>", bos_token="<|endoftext|>", eos_token="<|endoftext|>", pad_token=None, add_prefix_space=False, add_bos_token=False, return_token_type_ids=False, **kwargs, ): bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token self.add_bos_token = add_bos_token self.return_token_type_ids = return_token_type_ids if self.return_token_type_ids: self.model_input_names.append("token_type_ids") with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().split("\n")[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") super().__init__( errors=errors, unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, add_prefix_space=add_prefix_space, add_bos_token=add_bos_token, return_token_type_ids=return_token_type_ids, **kwargs, ) @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): if self.add_bos_token: bos_token_ids = [self.bos_token_id] else: bos_token_ids = [] output = bos_token_ids + token_ids_0 if token_ids_1 is None: return output return output + bos_token_ids + token_ids_1 def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) return bpe_tokens def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = "".join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] if self.sep_token_id is not None else [] cls = [self.cls_token_id] if self.sep_token_id is not None else [] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if is_split_into_words or add_prefix_space: text = " " + text return (text, kwargs) def decode( self, token_ids: Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = None, truncate_before_pattern: Optional[List[str]] = None, **kwargs, ) -> str: """ Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special tokens and clean up tokenization spaces. Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`. Args: token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`): List of tokenized input ids. Can be obtained using the `__call__` method. skip_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not to remove special tokens in the decoding. clean_up_tokenization_spaces (`bool`, *optional*): Whether or not to clean up the tokenization spaces. If `None`, will default to `self.clean_up_tokenization_spaces` (available in the `tokenizer_config`). truncate_before_pattern (`List[str]`, *optional*, defaults to `None`): A list of regular expression strings that will be used to truncate the returned string. This can be used to remove extra pieces of code (e.g. truncate if observing a comment symbol "#" at the beginning of a new line). An example pattern could be `["^#", re.escape("<|endoftext|>"), "^'''", "\n\n\n"]`. kwargs (additional keyword arguments, *optional*): Will be passed to the underlying model specific decode method. Returns: `str`: The decoded sentence. """ token_ids = to_py_obj(token_ids) decoded_text = super()._decode( token_ids=token_ids, skip_special_tokens=skip_special_tokens, clean_up_tokenization_spaces=clean_up_tokenization_spaces, **kwargs, ) if truncate_before_pattern is not None and len(truncate_before_pattern) > 0: decoded_text = self.truncate(decoded_text, truncate_before_pattern) return decoded_text def truncate(self, completion, truncate_before_pattern): def find_re(string, pattern, start_pos): m = pattern.search(string, start_pos) return m.start() if m else -1 terminals = [re.compile(pattern, re.MULTILINE) for pattern in truncate_before_pattern] prints = list(re.finditer("^print", completion, re.MULTILINE)) if len(prints) > 1: completion = completion[: prints[1].start()] defs = list(re.finditer("^def", completion, re.MULTILINE)) if len(defs) > 1: completion = completion[: defs[1].start()] start_pos = 0 terminals_pos = [ pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1 ] if len(terminals_pos) > 0: return completion[: min(terminals_pos)] else: return completion
transformers/src/transformers/models/codegen/tokenization_codegen.py/0
{ "file_path": "transformers/src/transformers/models/codegen/tokenization_codegen.py", "repo_id": "transformers", "token_count": 7179 }
352
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TF 2.0 ConvBERT model.""" from __future__ import annotations from typing import Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFMaskedLMOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFMaskedLanguageModelingLoss, TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFTokenClassificationLoss, get_initializer, keras, keras_serializable, unpack_inputs, ) from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax from ...utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_convbert import ConvBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base" _CONFIG_FOR_DOC = "ConvBertConfig" # Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert class TFConvBertEmbeddings(keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config: ConvBertConfig, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = config.embedding_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape=None): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.embedding_size], initializer=get_initializer(self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.embedding_size], initializer=get_initializer(self.initializer_range), ) if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.embedding_size]) # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, past_key_values_length=0, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ if input_ids is None and inputs_embeds is None: raise ValueError("Need to provide either `input_ids` or `input_embeds`.") if input_ids is not None: check_embeddings_within_bounds(input_ids, self.config.vocab_size) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims( tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0 ) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFConvBertSelfAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) new_num_attention_heads = int(config.num_attention_heads / config.head_ratio) if new_num_attention_heads < 1: self.head_ratio = config.num_attention_heads num_attention_heads = 1 else: num_attention_heads = new_num_attention_heads self.head_ratio = config.head_ratio self.num_attention_heads = num_attention_heads self.conv_kernel_size = config.conv_kernel_size if config.hidden_size % self.num_attention_heads != 0: raise ValueError("hidden_size should be divisible by num_attention_heads") self.attention_head_size = config.hidden_size // config.num_attention_heads self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" ) self.key = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" ) self.value = keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" ) self.key_conv_attn_layer = keras.layers.SeparableConv1D( self.all_head_size, self.conv_kernel_size, padding="same", activation=None, depthwise_initializer=get_initializer(1 / self.conv_kernel_size), pointwise_initializer=get_initializer(config.initializer_range), name="key_conv_attn_layer", ) self.conv_kernel_layer = keras.layers.Dense( self.num_attention_heads * self.conv_kernel_size, activation=None, name="conv_kernel_layer", kernel_initializer=get_initializer(config.initializer_range), ) self.conv_out_layer = keras.layers.Dense( self.all_head_size, activation=None, name="conv_out_layer", kernel_initializer=get_initializer(config.initializer_range), ) self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) self.config = config def transpose_for_scores(self, x, batch_size): # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer) conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer) conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1]) conv_kernel_layer = stable_softmax(conv_kernel_layer, axis=1) paddings = tf.constant( [ [ 0, 0, ], [int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)], [0, 0], ] ) conv_out_layer = self.conv_out_layer(hidden_states) conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size]) conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT") unfold_conv_out_layer = tf.stack( [ tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size]) for i in range(self.conv_kernel_size) ], axis=-1, ) conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size]) conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer) conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size]) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul( query_layer, key_layer, transpose_b=True ) # (batch size, num_heads, seq_len_q, seq_len_k) dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask value_layer = tf.reshape( mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size] ) value_layer = tf.transpose(value_layer, [0, 2, 1, 3]) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]) context_layer = tf.concat([context_layer, conv_out], 2) context_layer = tf.reshape( context_layer, (batch_size, -1, self.head_ratio * self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "query", None) is not None: with tf.name_scope(self.query.name): self.query.build([None, None, self.config.hidden_size]) if getattr(self, "key", None) is not None: with tf.name_scope(self.key.name): self.key.build([None, None, self.config.hidden_size]) if getattr(self, "value", None) is not None: with tf.name_scope(self.value.name): self.value.build([None, None, self.config.hidden_size]) if getattr(self, "key_conv_attn_layer", None) is not None: with tf.name_scope(self.key_conv_attn_layer.name): self.key_conv_attn_layer.build([None, None, self.config.hidden_size]) if getattr(self, "conv_kernel_layer", None) is not None: with tf.name_scope(self.conv_kernel_layer.name): self.conv_kernel_layer.build([None, None, self.all_head_size]) if getattr(self, "conv_out_layer", None) is not None: with tf.name_scope(self.conv_out_layer.name): self.conv_out_layer.build([None, None, self.config.hidden_size]) class TFConvBertSelfOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) class TFConvBertAttention(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.self_attention = TFConvBertSelfAttention(config, name="self") self.dense_output = TFConvBertSelfOutput(config, name="output") def prune_heads(self, heads): raise NotImplementedError def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False): self_outputs = self.self_attention( input_tensor, attention_mask, head_mask, output_attentions, training=training ) attention_output = self.dense_output(self_outputs[0], input_tensor, training=training) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "self_attention", None) is not None: with tf.name_scope(self.self_attention.name): self.self_attention.build(None) if getattr(self, "dense_output", None) is not None: with tf.name_scope(self.dense_output.name): self.dense_output.build(None) class GroupedLinearLayer(keras.layers.Layer): def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs): super().__init__(**kwargs) self.input_size = input_size self.output_size = output_size self.num_groups = num_groups self.kernel_initializer = kernel_initializer self.group_in_dim = self.input_size // self.num_groups self.group_out_dim = self.output_size // self.num_groups def build(self, input_shape=None): self.kernel = self.add_weight( "kernel", shape=[self.group_out_dim, self.group_in_dim, self.num_groups], initializer=self.kernel_initializer, trainable=True, ) self.bias = self.add_weight( "bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True ) super().build(input_shape) def call(self, hidden_states): batch_size = shape_list(hidden_states)[0] x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2]) x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0])) x = tf.transpose(x, [1, 0, 2]) x = tf.reshape(x, [batch_size, -1, self.output_size]) x = tf.nn.bias_add(value=x, bias=self.bias) return x class TFConvBertIntermediate(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.num_groups == 1: self.dense = keras.layers.Dense( config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) else: self.dense = GroupedLinearLayer( config.hidden_size, config.intermediate_size, num_groups=config.num_groups, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act self.config = config def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) class TFConvBertOutput(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.num_groups == 1: self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) else: self.dense = GroupedLinearLayer( config.intermediate_size, config.hidden_size, num_groups=config.num_groups, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) self.config = config def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.intermediate_size]) class TFConvBertLayer(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.attention = TFConvBertAttention(config, name="attention") self.intermediate = TFConvBertIntermediate(config, name="intermediate") self.bert_output = TFConvBertOutput(config, name="output") def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions, training=training ) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.bert_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "attention", None) is not None: with tf.name_scope(self.attention.name): self.attention.build(None) if getattr(self, "intermediate", None) is not None: with tf.name_scope(self.intermediate.name): self.intermediate.build(None) if getattr(self, "bert_output", None) is not None: with tf.name_scope(self.bert_output.name): self.bert_output.build(None) class TFConvBertEncoder(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] def call( self, hidden_states, attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=False, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], output_attentions, training=training ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "layer", None) is not None: for layer in self.layer: with tf.name_scope(layer.name): layer.build(None) class TFConvBertPredictionHeadTransform(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.config = config def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.hidden_size]) @keras_serializable class TFConvBertMainLayer(keras.layers.Layer): config_class = ConvBertConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.embeddings = TFConvBertEmbeddings(config, name="embeddings") if config.embedding_size != config.hidden_size: self.embeddings_project = keras.layers.Dense(config.hidden_size, name="embeddings_project") self.encoder = TFConvBertEncoder(config, name="encoder") self.config = config def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = value.shape[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ raise NotImplementedError def get_extended_attention_mask(self, attention_mask, input_shape, dtype): if attention_mask is None: attention_mask = tf.fill(input_shape, 1) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def get_head_mask(self, head_mask): if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.config.num_hidden_layers return head_mask @unpack_inputs def call( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) hidden_states = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, hidden_states.dtype) head_mask = self.get_head_mask(head_mask) if hasattr(self, "embeddings_project"): hidden_states = self.embeddings_project(hidden_states, training=training) hidden_states = self.encoder( hidden_states, extended_attention_mask, head_mask, output_attentions, output_hidden_states, return_dict, training=training, ) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "embeddings", None) is not None: with tf.name_scope(self.embeddings.name): self.embeddings.build(None) if getattr(self, "encoder", None) is not None: with tf.name_scope(self.encoder.name): self.encoder.build(None) if getattr(self, "embeddings_project", None) is not None: with tf.name_scope(self.embeddings_project.name): self.embeddings_project.build([None, None, self.config.embedding_size]) class TFConvBertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ConvBertConfig base_model_prefix = "convbert" CONVBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Args: config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CONVBERT_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.", CONVBERT_START_DOCSTRING, ) class TFConvBertModel(TFConvBertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.convbert = TFConvBertMainLayer(config, name="convbert") @unpack_inputs @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: Optional[Union[np.array, tf.Tensor]] = None, token_type_ids: Optional[Union[np.array, tf.Tensor]] = None, position_ids: Optional[Union[np.array, tf.Tensor]] = None, head_mask: Optional[Union[np.array, tf.Tensor]] = None, inputs_embeds: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: outputs = self.convbert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convbert", None) is not None: with tf.name_scope(self.convbert.name): self.convbert.build(None) class TFConvBertMaskedLMHead(keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.config = config self.embedding_size = config.embedding_size self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def get_output_embeddings(self): return self.input_embeddings def set_output_embeddings(self, value): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self): return {"bias": self.bias} def set_bias(self, value): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states): seq_length = shape_list(tensor=hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states class TFConvBertGeneratorPredictions(keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dense = keras.layers.Dense(config.embedding_size, name="dense") self.config = config def call(self, generator_hidden_states, training=False): hidden_states = self.dense(generator_hidden_states) hidden_states = get_tf_activation("gelu")(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "LayerNorm", None) is not None: with tf.name_scope(self.LayerNorm.name): self.LayerNorm.build([None, None, self.config.embedding_size]) if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) @add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING) class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, **kwargs) self.config = config self.convbert = TFConvBertMainLayer(config, name="convbert") self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions") if isinstance(config.hidden_act, str): self.activation = get_tf_activation(config.hidden_act) else: self.activation = config.hidden_act self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head") def get_lm_head(self): return self.generator_lm_head def get_prefix_bias_name(self): return self.name + "/" + self.generator_lm_head.name @unpack_inputs @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFMaskedLMOutput]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ generator_hidden_states = self.convbert( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) generator_sequence_output = generator_hidden_states[0] prediction_scores = self.generator_predictions(generator_sequence_output, training=training) prediction_scores = self.generator_lm_head(prediction_scores, training=training) loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) if not return_dict: output = (prediction_scores,) + generator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return TFMaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_states, attentions=generator_hidden_states.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convbert", None) is not None: with tf.name_scope(self.convbert.name): self.convbert.build(None) if getattr(self, "generator_predictions", None) is not None: with tf.name_scope(self.generator_predictions.name): self.generator_predictions.build(None) if getattr(self, "generator_lm_head", None) is not None: with tf.name_scope(self.generator_lm_head.name): self.generator_lm_head.build(None) class TFConvBertClassificationHead(keras.layers.Layer): """Head for sentence-level classification tasks.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" ) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.out_proj = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj" ) self.config = config def call(self, hidden_states, **kwargs): x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = get_tf_activation(self.config.hidden_act)(x) x = self.dropout(x) x = self.out_proj(x) return x def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "dense", None) is not None: with tf.name_scope(self.dense.name): self.dense.build([None, None, self.config.hidden_size]) if getattr(self, "out_proj", None) is not None: with tf.name_scope(self.out_proj.name): self.out_proj.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks. """, CONVBERT_START_DOCSTRING, ) class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.convbert = TFConvBertMainLayer(config, name="convbert") self.classifier = TFConvBertClassificationHead(config, name="classifier") @unpack_inputs @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFSequenceClassifierOutput]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ outputs = self.convbert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) logits = self.classifier(outputs[0], training=training) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convbert", None) is not None: with tf.name_scope(self.convbert.name): self.convbert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build(None) @add_start_docstrings( """ ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, CONVBERT_START_DOCSTRING, ) class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.convbert = TFConvBertMainLayer(config, name="convbert") self.sequence_summary = TFSequenceSummary( config, initializer_range=config.initializer_range, name="sequence_summary" ) self.classifier = keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward( CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFMultipleChoiceModelOutput]: r""" labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ if input_ids is not None: num_choices = shape_list(input_ids)[1] seq_length = shape_list(input_ids)[2] else: num_choices = shape_list(inputs_embeds)[1] seq_length = shape_list(inputs_embeds)[2] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None flat_inputs_embeds = ( tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) if inputs_embeds is not None else None ) outputs = self.convbert( flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, head_mask, flat_inputs_embeds, output_attentions, output_hidden_states, return_dict=return_dict, training=training, ) logits = self.sequence_summary(outputs[0], training=training) logits = self.classifier(logits) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) if not return_dict: output = (reshaped_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convbert", None) is not None: with tf.name_scope(self.convbert.name): self.convbert.build(None) if getattr(self, "sequence_summary", None) is not None: with tf.name_scope(self.sequence_summary.name): self.sequence_summary.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, CONVBERT_START_DOCSTRING, ) class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.convbert = TFConvBertMainLayer(config, name="convbert") classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = keras.layers.Dropout(classifier_dropout) self.classifier = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFTokenClassifierOutput]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ outputs = self.convbert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output, training=training) logits = self.classifier(sequence_output) loss = None if labels is None else self.hf_compute_loss(labels, logits) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convbert", None) is not None: with tf.name_scope(self.convbert.name): self.convbert.build(None) if getattr(self, "classifier", None) is not None: with tf.name_scope(self.classifier.name): self.classifier.build([None, None, self.config.hidden_size]) @add_start_docstrings( """ ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, CONVBERT_START_DOCSTRING, ) class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.convbert = TFConvBertMainLayer(config, name="convbert") self.qa_outputs = keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) self.config = config @unpack_inputs @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: TFModelInputType | None = None, attention_mask: np.ndarray | tf.Tensor | None = None, token_type_ids: np.ndarray | tf.Tensor | None = None, position_ids: np.ndarray | tf.Tensor | None = None, head_mask: np.ndarray | tf.Tensor | None = None, inputs_embeds: tf.Tensor | None = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, start_positions: tf.Tensor | None = None, end_positions: tf.Tensor | None = None, training: Optional[bool] = False, ) -> Union[Tuple, TFQuestionAnsweringModelOutput]: r""" start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ outputs = self.convbert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if start_positions is not None and end_positions is not None: labels = {"start_position": start_positions} labels["end_position"] = end_positions loss = self.hf_compute_loss(labels, (start_logits, end_logits)) if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((loss,) + output) if loss is not None else output return TFQuestionAnsweringModelOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def build(self, input_shape=None): if self.built: return self.built = True if getattr(self, "convbert", None) is not None: with tf.name_scope(self.convbert.name): self.convbert.build(None) if getattr(self, "qa_outputs", None) is not None: with tf.name_scope(self.qa_outputs.name): self.qa_outputs.build([None, None, self.config.hidden_size])
transformers/src/transformers/models/convbert/modeling_tf_convbert.py/0
{ "file_path": "transformers/src/transformers/models/convbert/modeling_tf_convbert.py", "repo_id": "transformers", "token_count": 26514 }
353
# coding=utf-8 # Copyright 2022 The HuggingFace Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Decision Transformer model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) class DecisionTransformerConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a [`DecisionTransformerModel`]. It is used to instantiate a Decision Transformer model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the standard DecisionTransformer architecture. Many of the config options are used to instatiate the GPT2 model that is used as part of the architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: state_dim (`int`, *optional*, defaults to 17): The state size for the RL environment act_dim (`int`, *optional*, defaults to 4): The size of the output action space hidden_size (`int`, *optional*, defaults to 128): The size of the hidden layers max_ep_len (`int`, *optional*, defaults to 4096): The maximum length of an episode in the environment action_tanh (`bool`, *optional*, defaults to True): Whether to use a tanh activation on action prediction vocab_size (`int`, *optional*, defaults to 50257): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`DecisionTransformerModel`]. n_positions (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_layer (`int`, *optional*, defaults to 3): Number of hidden layers in the Transformer encoder. n_head (`int`, *optional*, defaults to 1): Number of attention heads for each attention layer in the Transformer encoder. n_inner (`int`, *optional*): Dimensionality of the inner feed-forward layers. If unset, will default to 4 times `n_embd`. activation_function (`str`, *optional*, defaults to `"gelu"`): Activation function, to be selected in the list `["relu", "silu", "gelu", "tanh", "gelu_new"]`. resid_pdrop (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (`int`, *optional*, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (`float`, *optional*, defaults to 1e-5): The epsilon to use in the layer normalization layers. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. scale_attn_weights (`bool`, *optional*, defaults to `True`): Scale attention weights by dividing by sqrt(hidden_size).. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`): Whether to additionally scale attention weights by `1 / layer_idx + 1`. reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`): Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention dot-product/softmax to float() when training with mixed precision. Example: ```python >>> from transformers import DecisionTransformerConfig, DecisionTransformerModel >>> # Initializing a DecisionTransformer configuration >>> configuration = DecisionTransformerConfig() >>> # Initializing a model (with random weights) from the configuration >>> model = DecisionTransformerModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "decision_transformer" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, state_dim=17, act_dim=4, hidden_size=128, max_ep_len=4096, action_tanh=True, vocab_size=1, n_positions=1024, n_layer=3, n_head=1, n_inner=None, activation_function="relu", resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, scale_attn_weights=True, use_cache=True, bos_token_id=50256, eos_token_id=50256, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False, **kwargs, ): self.state_dim = state_dim self.act_dim = act_dim self.hidden_size = hidden_size self.max_ep_len = max_ep_len self.action_tanh = action_tanh self.vocab_size = vocab_size self.n_positions = n_positions self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.scale_attn_weights = scale_attn_weights self.use_cache = use_cache self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx self.reorder_and_upcast_attn = reorder_and_upcast_attn self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
transformers/src/transformers/models/decision_transformer/configuration_decision_transformer.py/0
{ "file_path": "transformers/src/transformers/models/decision_transformer/configuration_decision_transformer.py", "repo_id": "transformers", "token_count": 2663 }
354
# coding=utf-8 # Copyright 2023 Xuan Ouyang, Shuohuan Wang, Chao Pang, Yu Sun, Hao Tian, Hua Wu, Haifeng Wang and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ErnieM model configuration""" # Adapted from original paddlenlp repository.(https://github.com/PaddlePaddle/PaddleNLP/blob/develop/paddlenlp/transformers/ernie_m/configuration.py) from __future__ import annotations from typing import Dict from ....configuration_utils import PretrainedConfig class ErnieMConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ErnieMModel`]. It is used to instantiate a Ernie-M model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `Ernie-M` [susnato/ernie-m-base_pytorch](https://huggingface.co/susnato/ernie-m-base_pytorch) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 250002): Vocabulary size of `inputs_ids` in [`ErnieMModel`]. Also is the vocab size of token embedding matrix. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ErnieMModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the embedding layer, encoder layers and pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the feed-forward (ff) layer in the encoder. Input tensors to feed-forward layers are firstly projected from hidden_size to intermediate_size, and then projected back to hidden_size. Typically intermediate_size is larger than hidden_size. hidden_act (`str`, *optional*, defaults to `"gelu"`): The non-linear activation function in the feed-forward layer. `"gelu"`, `"relu"` and any other torch supported activation functions are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability used in `MultiHeadAttention` in all encoder layers to drop some attention target. max_position_embeddings (`int`, *optional*, defaults to 514): The maximum value of the dimensionality of position encoding, which dictates the maximum supported length of an input sequence. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the normal initializer for initializing all weight matrices. The index of padding token in the token vocabulary. pad_token_id (`int`, *optional*, defaults to 1): Padding token id. layer_norm_eps (`float`, *optional*, defaults to 1e-05): The epsilon used by the layer normalization layers. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. act_dropout (`float`, *optional*, defaults to 0.0): This dropout probability is used in `ErnieMEncoderLayer` after activation. A normal_initializer initializes weight matrices as normal distributions. See `ErnieMPretrainedModel._init_weights()` for how weights are initialized in `ErnieMModel`. """ model_type = "ernie_m" attribute_map: Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self, vocab_size: int = 250002, hidden_size: int = 768, num_hidden_layers: int = 12, num_attention_heads: int = 12, intermediate_size: int = 3072, hidden_act: str = "gelu", hidden_dropout_prob: float = 0.1, attention_probs_dropout_prob: float = 0.1, max_position_embeddings: int = 514, initializer_range: float = 0.02, pad_token_id: int = 1, layer_norm_eps: float = 1e-05, classifier_dropout=None, act_dropout=0.0, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.classifier_dropout = classifier_dropout self.act_dropout = act_dropout
transformers/src/transformers/models/deprecated/ernie_m/configuration_ernie_m.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/ernie_m/configuration_ernie_m.py", "repo_id": "transformers", "token_count": 2128 }
355
# coding=utf-8 # Copyright 2022 The OpenAI Team Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Jukebox model.""" import math import os from typing import List, Optional, Tuple import numpy as np import torch import torch.nn.functional as F from torch import nn from torch.nn import LayerNorm as FusedLayerNorm from ....activations import ACT2FN from ....modeling_utils import PreTrainedModel from ....utils import add_start_docstrings, logging from ....utils.logging import tqdm from .configuration_jukebox import ATTENTION_PATTERNS, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig logger = logging.get_logger(__name__) def filter_logits(logits, top_k=0, top_p=0.0, filter_value=-float("Inf")): """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits (`torch.Tensor`): logits distribution shape (vocabulary size) top_k (`int`, *optional*, defaults to 0): When `top_k >0` keep only top key tokens with highest probability (top-k filtering). top_p (`int`, *optional*, defaults to 0): When `top_p>0.0` keep the top tokens with cumulative probability >= `top_p` (nucleus filtering). """ logits = logits.clone() top_k = min(top_k, logits.size(-1)) # Safety check if top_k > 0: # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = logits < torch.topk(logits, top_k, dim=-1)[0][..., -1:] logits[indices_to_remove] = filter_value if top_p > 0.0: sorted_logits, sorted_indices = torch.sort(logits, descending=True, dim=-1) cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) # Remove tokens with cumulative probability above the threshold sorted_indices_to_remove = cumulative_probs > top_p # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # indices_to_remove = sorted_indices[sorted_indices_to_remove] indices_to_remove = torch.zeros_like(logits, dtype=torch.bool).scatter_( dim=-1, index=sorted_indices, src=sorted_indices_to_remove ) logits[indices_to_remove] = filter_value return logits def get_relevant_lyric_tokens(full_tokens, max_n_lyric_tokens, total_length, offset, duration): """ Extract only the relevant tokens based on the character position. A total of `max_n_lyric_tokens` tokens will be returned. If the provided token sequence is smaller, it will be padded, otherwise, only characters ranging from the midpoint - `max_n_lyric_tokens//2` to the midpoint + `max_n_lyric_tokens//2` will be returned. This *focuses* on the most relevant tokens (in time) for the sequence. Args: full_tokens (`List[int]`): List containing the token ids of the entire lyrics. total_length (`int`): Total expected length of the music (not all of it is generated, see duration), in samples. offset (`int`): Starting sample in the music. If the offset is greater than 0, the lyrics will be shifted take that into account duration (`int`): Expected duration of the generated music, in samples. The duration has to be smaller than the total length, which represent the overall length of the signal, """ full_tokens = full_tokens[0] if len(full_tokens) < max_n_lyric_tokens: tokens = torch.cat( [torch.zeros(max_n_lyric_tokens - len(full_tokens), dtype=torch.long).to(full_tokens.device), full_tokens] ) indices = [-1] * (max_n_lyric_tokens - len(full_tokens)) + list(range(0, len(full_tokens))) else: midpoint = int(len(full_tokens) * (offset + duration / 2.0) / total_length) midpoint = min(max(midpoint, max_n_lyric_tokens // 2), len(full_tokens) - max_n_lyric_tokens // 2) tokens = full_tokens[midpoint - max_n_lyric_tokens // 2 : midpoint + max_n_lyric_tokens // 2] indices = list(range(midpoint - max_n_lyric_tokens // 2, midpoint + max_n_lyric_tokens // 2)) return tokens.unsqueeze(dim=0), indices # Break total_length into hops/windows of size n_ctx separated by hop_length def get_starts(total_length, n_ctx, hop_length): starts = [] for start in range(0, total_length - n_ctx + hop_length, hop_length): if start + n_ctx >= total_length: # Last hop could be smaller, we make it n_ctx to maximise context start = total_length - n_ctx starts.append(start) return starts def get_alignment(music_tokens, labels, prior, config): level = prior.levels - 1 # Top level used n_ctx = prior.n_ctx tokens = music_tokens[level] batch_size, total_length = tokens.shape[0], tokens.shape[1] if total_length < n_ctx: padding_length = n_ctx - total_length tokens = torch.cat( [tokens, torch.zeros(batch_size, n_ctx - total_length, dtype=tokens.dtype, device=tokens.device)], dim=1 ) total_length = tokens.shape[1] else: padding_length = 0 hop_length = int(config.hop_fraction[-level - 1] * prior.n_ctx) alignment_head, alignment_layer = config.prior_alignment_head[0], config.prior_alignment_layer[0] attn_layers = {alignment_layer} alignment_hops = {} indices_hops = {} for start in tqdm(get_starts(total_length, n_ctx, hop_length), desc="Computing lyric to music alignment "): end = start + n_ctx # set metadata offset, sample_length and lyrics tokens metadata, indices_hop = prior.get_metadata(labels, start, config.sample_length, get_indices=True, offset=0) tokens_bs = torch.chunk(tokens, batch_size, dim=0) metadata_bs = torch.chunk(metadata, batch_size, dim=0) w_hops = [] for tokens_i, metadata_i in zip(tokens_bs, metadata_bs): w_hop = prior.forward_tokens(tokens_i[:, start:end], [], metadata_i, get_attn_weights=attn_layers) w_hops.append(w_hop[0][:, alignment_head]) del w_hop weights = torch.cat(w_hops, dim=0) del w_hops alignment_hop = weights.float().cpu().numpy() del weights # alignment_hop has shape (bs, n_ctx, nb_relevant_lyric_tokens) # indices_hop is a list of len=bs, each entry of len hps.nb_relevant_lyric_tokens indices_hops[start] = indices_hop alignment_hops[start] = alignment_hop # Combine attn for each hop into attn for full range # Use indices to place them into correct place for corresponding source tokens alignments = [] for item in range(batch_size): # Note each item has different length lyrics full_tokens = labels[0, 3:] alignment = np.zeros((total_length, len(full_tokens) + 1)) for start in reversed(get_starts(total_length, n_ctx, hop_length)): end = start + n_ctx alignment_hop = alignment_hops[start][item] indices = indices_hops[start][item] alignment[start:end, indices] = alignment_hop alignment = alignment[: total_length - padding_length, :-1] # remove token padding, and last lyric index alignments.append(alignment) return alignments def save_temp_audio(fname, lvl, metas, aud): aud = torch.clamp(aud, -1, 1).cpu().numpy() for i in list(range(aud.shape[0])): if metas is not None: artists, genres, lyrics = list(metas)[i].values() path = f"{fname}/lvl_{lvl}-{artists}-{genres}-{lyrics[:5]}-{i}" np.save(path, aud[i]) else: np.save(f"{fname}/lvl_{lvl}-sample-{i}", aud[i]) def get_mask(mask, query_length, key_value_length, blocks, spread, device, sample, sample_t): # returns a mask of shape 1 x 1 x query_length x key_value_length or None if masking is not needed. if mask is None or query_length == 1: return None offset = sample_t - query_length if sample else max(key_value_length - query_length, 0) if mask == "autoregressive": # Masked dense mask = torch.ones(query_length, key_value_length, device=device).tril(offset) elif mask == "summary": # Masked summary mask = torch.ones(query_length, query_length, device=device).tril() mask = torch.ones(query_length, query_length, device=device).tril() mask = mask.view(query_length, blocks, query_length // blocks)[:, :-1, -key_value_length // blocks :] mask = ( torch.nn.functional.pad( mask, (0, 0, 1, 0), value=1, ) .contiguous() .view(query_length, key_value_length) ) elif mask == "prime": mask = torch.ones(query_length, key_value_length, device=device).tril(offset) return mask.view(1, 1, query_length, key_value_length) class JukeboxConv1D(nn.Module): def __init__(self, input_width, output_width): super().__init__() self.input_width = input_width self.output_width = output_width weight = torch.empty(input_width, output_width) bias = torch.zeros(output_width) self.weight = nn.Parameter(weight) self.bias = nn.Parameter(bias) def forward(self, hidden_states): size_out = (*hidden_states.size()[:-1], self.output_width) hidden_states = torch.addmm( self.bias.type_as(hidden_states), hidden_states.view(-1, hidden_states.size(-1)), self.weight.type_as(hidden_states), ) hidden_states = hidden_states.view(*size_out) return hidden_states class JukeboxResConv1DBlock(nn.Module): def __init__(self, config, conv_width, depth=1, res_scale=1.0): super().__init__() hidden_dim = config.res_convolution_multiplier * conv_width dilation = config.res_dilation_growth_rate**depth padding = dilation self.res_scale = res_scale self.activation = nn.ReLU() self.conv1d_1 = nn.Conv1d(conv_width, hidden_dim, 3, 1, padding, dilation) self.conv1d_2 = nn.Conv1d(hidden_dim, conv_width, 1, 1, 0) def forward(self, hidden_states): residuals = hidden_states hidden_states = self.activation(hidden_states) hidden_states = self.conv1d_1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.conv1d_2(hidden_states) return residuals + self.res_scale * hidden_states class JukeboxResnet1D(nn.Module): def __init__(self, config, conv_width, n_depth, reverse_dilation=False): super().__init__() self.dilation_cycle = config.res_dilation_cycle res_scale = 1.0 if not config.conv_res_scale else 1.0 / math.sqrt(n_depth) blocks = [] for depth in range(n_depth): block_depth = depth if self.dilation_cycle is None else depth % self.dilation_cycle blocks.append(JukeboxResConv1DBlock(config, conv_width, block_depth, res_scale)) if reverse_dilation: blocks = blocks[::-1] self.resnet_block = nn.ModuleList(blocks) def forward(self, hidden_states): for block in self.resnet_block: hidden_states = block(hidden_states) return hidden_states class JukeboxEncoderConvBlock(nn.Module): def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t): super().__init__() blocks = [] filter_t = stride_t * 2 pad_t = stride_t // 2 if down_t > 0: for i in range(down_t): blocks.append(nn.Conv1d(embed_dim if i == 0 else hidden_dim, hidden_dim, filter_t, stride_t, pad_t)) blocks.append(JukeboxResnet1D(config, hidden_dim, depth)) self.proj_out = nn.Conv1d(hidden_dim, config.embed_dim, 3, 1, 1) self.downsample_block = nn.ModuleList(blocks) def forward(self, hidden_states): for block in self.downsample_block: hidden_states = block(hidden_states) hidden_states = self.proj_out(hidden_states) return hidden_states class JukeboxEncoder(nn.Module): def __init__(self, config, width, depth, levels, downs_t, strides_t): super().__init__() self.levels = levels self.level_blocks = nn.ModuleList() iterator = zip(list(range(self.levels)), downs_t, strides_t) for i, down_t, stride_t in iterator: self.level_blocks.append( JukeboxEncoderConvBlock( config, config.conv_input_shape if i == 0 else config.embed_dim, width, depth, down_t, stride_t ) ) def forward(self, hidden_states): all_hidden_states = [] # 64, 32, ... for level in range(self.levels): level_block = self.level_blocks[level] hidden_states = level_block(hidden_states) all_hidden_states.append(hidden_states) return all_hidden_states class JukeboxDecoderConvBock(nn.Module): def __init__(self, config, embed_dim, hidden_dim, depth, down_t, stride_t, reverse_dilation=True): self.embed_dim = embed_dim self.hidden_dim = hidden_dim super().__init__() blocks = [] if down_t > 0: filter_t = stride_t * 2 pad_t = stride_t // 2 self.proj_in = nn.Conv1d(embed_dim, hidden_dim, 3, 1, 1) for i in range(down_t): blocks.append(JukeboxResnet1D(config, hidden_dim, depth, reverse_dilation)) blocks.append( nn.ConvTranspose1d( hidden_dim, hidden_dim if i < down_t - 1 else embed_dim, filter_t, stride_t, pad_t ) ) self.upsample_block = nn.ModuleList(blocks) def forward(self, hidden_states): hidden_states = self.proj_in(hidden_states) for block in self.upsample_block: hidden_states = block(hidden_states) return hidden_states class JukeboxDecoder(nn.Module): def __init__(self, config, hidden_dim, depth, levels, downs_t, strides_t): super().__init__() self.levels = levels self.level_blocks = nn.ModuleList() for level, down_t, stride_t in zip(list(range(self.levels)), downs_t, strides_t): self.level_blocks.append( JukeboxDecoderConvBock(config, config.embed_dim, hidden_dim, depth, down_t, stride_t) ) self.out = nn.Conv1d(config.embed_dim, config.conv_input_shape, 3, 1, 1) def forward(self, hidden_states, all_levels=True): hidden_state = hidden_states[-1] # 32, 64 ... for level in reversed(range(self.levels)): level_block = self.level_blocks[level] hidden_state = level_block(hidden_state) if level != 0 and all_levels: hidden_state = hidden_state + hidden_states[level - 1] hidden_state = self.out(hidden_state) return hidden_state class JukeboxBottleneckBlock(nn.Module): def __init__(self, config: JukeboxVQVAEConfig): super().__init__() self.nb_discrete_codes = config.nb_discrete_codes self.codebook_width = config.embed_dim self.mu = config.lmu self.threshold = 1.0 self.init = False self.codebook_sum = None self.codebook_elem = None self.register_buffer("codebook", torch.zeros(self.nb_discrete_codes, self.codebook_width)) def _tile(self, hidden_states): dim, embed_width = hidden_states.shape if dim < self.nb_discrete_codes: n_repeats = (self.nb_discrete_codes + dim - 1) // dim std = 0.01 / np.sqrt(embed_width) hidden_states = hidden_states.repeat(n_repeats, 1) hidden_states = hidden_states + torch.randn_like(hidden_states) * std return hidden_states def init_codebook(self, hidden_states): nb_discrete_codes = self.nb_discrete_codes self.init = True codes = self._tile(hidden_states) self.codebook = codes[torch.randperm(codes.shape[0])][:nb_discrete_codes] self.codebook_sum = self.codebook self.codebook_elem = torch.ones(nb_discrete_codes, device=self.codebook.device) def update_codebook(self, hidden_states, latent_states): mu, codebook_width, nb_discrete_codes = self.mu, self.codebook_width, self.nb_discrete_codes with torch.no_grad(): # Calculate new centres # nb_discrete_codes, batch_size * seq_length latent_states_onehot = torch.zeros(nb_discrete_codes, hidden_states.shape[0], device=hidden_states.device) latent_states_onehot.scatter_(0, latent_states.view(1, hidden_states.shape[0]), 1) _codebook_sum = torch.matmul(latent_states_onehot, hidden_states) _codebook_elem = latent_states_onehot.sum(dim=-1) # nb_discrete_codes codes = self._tile(hidden_states) _random_codebook = codes[torch.randperm(codes.shape[0])][:nb_discrete_codes] # Update centres old_codebook = self.codebook self.codebook_sum = mu * self.codebook_sum + (1.0 - mu) * _codebook_sum self.codebook_elem = mu * self.codebook_elem + (1.0 - mu) * _codebook_elem # nb_discrete_codes usage = (self.codebook_elem.view(nb_discrete_codes, 1) >= self.threshold).float() norm_code = self.codebook_sum.view(nb_discrete_codes, codebook_width) / self.codebook_elem.view( nb_discrete_codes, 1 ) self.codebook = usage * (norm_code) + (1 - usage) * _random_codebook _codebook_prob = _codebook_elem / torch.sum(_codebook_elem) # prob of each bin entropy = -torch.sum(_codebook_prob * torch.log(_codebook_prob + 1e-8)) # entropy ie how diverse used_curr = (_codebook_elem >= self.threshold).sum() usage = torch.sum(usage) dk = torch.norm(self.codebook - old_codebook) / np.sqrt(np.prod(old_codebook.shape)) return {"entropy": entropy, "used_curr": used_curr, "usage": usage, "dk": dk} def preprocess(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 1).contiguous() hidden_states = hidden_states.view(-1, hidden_states.shape[-1]) if hidden_states.shape[-1] == self.codebook_width: prenorm = torch.norm(hidden_states - torch.mean(hidden_states)) / np.sqrt(np.prod(hidden_states.shape)) elif hidden_states.shape[-1] == 2 * self.codebook_width: x1, x2 = hidden_states[..., : self.codebook_width], hidden_states[..., self.codebook_width :] prenorm = (torch.norm(x1 - torch.mean(x1)) / np.sqrt(np.prod(x1.shape))) + ( torch.norm(x2 - torch.mean(x2)) / np.sqrt(np.prod(x2.shape)) ) # Normalise hidden_states = x1 + x2 return hidden_states, prenorm def postprocess(self, latent_states, dequantised_states, x_shape): batch_size, time = x_shape dequantised_states = dequantised_states.view(batch_size, time, -1).permute(0, 2, 1).contiguous() latent_states = latent_states.view(batch_size, time) return latent_states, dequantised_states def quantise(self, latent_states): # Calculate latent code latent_states codebook_weights = self.codebook.t() distance = ( torch.sum(latent_states**2, dim=-1, keepdim=True) - 2 * torch.matmul(latent_states, codebook_weights) + torch.sum(codebook_weights**2, dim=0, keepdim=True) ) # (batch_size * latent_states , codebook_weights) min_distance, music_tokens = torch.min(distance, dim=-1) fit = torch.mean(min_distance) return music_tokens, fit def dequantise(self, music_tokens): dequantised_states = F.embedding(music_tokens, self.codebook) return dequantised_states def encode(self, latent_states): samples, _, seq_len = latent_states.shape # Preprocess. latent_states, _ = self.preprocess(latent_states) # Quantise music_tokens, _ = self.quantise(latent_states) # Postprocess. music_tokens = music_tokens.view(samples, seq_len) return music_tokens def decode(self, music_tokens): samples, seq_len = music_tokens.shape # Dequantise dequantised_states = self.dequantise(music_tokens) # Postprocess dequantised_states = ( dequantised_states.view(samples, seq_len, self.codebook_width).permute(0, 2, 1).contiguous() ) return dequantised_states def forward(self, hidden_states, update_codebook=True): samples, _, seq_len = hidden_states.shape # Preprocess hidden_states, prenorm = self.preprocess(hidden_states) # Init codebook if not inited if update_codebook and not self.init: self.init_codebook(hidden_states) # Quantise and dequantise through bottleneck music_tokens, fit = self.quantise(hidden_states) dequantised_states = self.dequantise(music_tokens) # Update embeddings if update_codebook: update_metrics = self.update_codebook(hidden_states, music_tokens) else: update_metrics = {} # Loss commit_loss = torch.norm(dequantised_states.detach() - hidden_states) ** 2 / np.prod(hidden_states.shape) # Passthrough dequantised_states = hidden_states + (dequantised_states - hidden_states).detach() # Postprocess music_tokens, dequantised_states = self.postprocess(music_tokens, dequantised_states, (samples, seq_len)) return music_tokens, dequantised_states, commit_loss, dict(fit=fit, pn=prenorm, **update_metrics) class JukeboxBottleneck(nn.Module): def __init__(self, config, levels): super().__init__() self.levels = levels self.level_blocks = nn.ModuleList() for level in range(self.levels): self.level_blocks.append(JukeboxBottleneckBlock(config)) def encode(self, raw_audio): music_tokens = [ level_block.encode(hidden_states) for (level_block, hidden_states) in zip(self.level_blocks, raw_audio) ] return music_tokens def decode(self, music_tokens, start_level=0, end_level=None): if end_level is None: end_level = self.levels quantised_audio = [ level_block.decode(z) for (level_block, z) in zip(self.level_blocks[start_level:end_level], music_tokens) ] return quantised_audio def forward(self, input_audio): music_tokens, quantised_states, commit_losses, metrics = [], [], [], [] for level in range(self.levels): level_block = self.level_blocks[-level - 1] hidden_states = input_audio[level] sampled_tokens, quantised_state, commit_loss, metric = level_block( hidden_states, update_codebook=self.training ) music_tokens.append(sampled_tokens) if not self.training: # Be extra paranoid and make sure the encoder weights can't # change from straight-through estimator quantised_state = quantised_state.detach() quantised_states.append(quantised_state) commit_losses.append(commit_loss) if self.training: metrics.append(metric) return music_tokens, quantised_states, commit_losses, metrics JUKEBOX_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (`JukeboxConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( """The Hierarchical VQ-VAE model used in Jukebox. This model follows the Hierarchical VQVAE paper from [Will Williams, Sam Ringer, Tom Ash, John Hughes, David MacLeod, Jamie Dougherty](https://arxiv.org/abs/2002.08111). """, JUKEBOX_START_DOCSTRING, ) class JukeboxVQVAE(PreTrainedModel): config_class = JukeboxVQVAEConfig base_model_prefix = "vqvae" def _init_weights(self, module): if isinstance(module, nn.Embedding): # embed_tokens module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_scale) elif isinstance(module, JukeboxConv1D): if self.config.zero_out: module.weight.data.zero_() else: module.weight.data.normal_(mean=0.0, std=0.02 * self.config.init_scale) elif isinstance(module, JukeboxResConv1DBlock) and self.config.zero_out: module.conv1d_2.weight.data.zero_() module.conv1d_2.bias.data.zero_() if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def __init__(self, config: JukeboxVQVAEConfig): super().__init__(config) downs_t = config.res_downs_t strides_t = config.res_strides_t if not config.sample_length: downsamples = [stride**down for stride, down in zip(strides_t, downs_t)] top_raw_to_tokens = np.prod(downsamples) config.sample_length = ( config.sample_length_in_seconds * config.sampling_rate // top_raw_to_tokens ) * top_raw_to_tokens config.sample_length = config.sample_length.astype(int) self.nb_discrete_codes = config.nb_discrete_codes self.commit = config.commit self.sample_length = config.sample_length self.downsamples = [stride**down for stride, down in zip(strides_t, downs_t)] self.hop_lengths = np.cumprod(self.downsamples) self.levels = levels = config.levels self.music_tokens_shapes = [ (int(self.sample_length // self.hop_lengths[-level - 1])) for level in range(levels) ] self.multipliers = config.multipliers if config.multipliers is not None else [1] * levels self.encoders = nn.ModuleList() self.decoders = nn.ModuleList() for level in range(levels): width = config.res_conv_width * self.multipliers[level] depth = config.res_conv_depth * self.multipliers[level] self.encoders.append( JukeboxEncoder(config, width, depth, level + 1, downs_t[: level + 1], strides_t[: level + 1]) ) self.decoders.append( JukeboxDecoder(config, width, depth, level + 1, downs_t[: level + 1], strides_t[: level + 1]) ) self.bottleneck = JukeboxBottleneck(config, levels) def _decode(self, music_tokens, start_level=0, end_level=None): # Decode if end_level is None: end_level = self.levels latent_states = self.bottleneck.decode(music_tokens, start_level=start_level, end_level=end_level) # Use only lowest level decoder, dequantised_state = self.decoders[start_level], latent_states[0:1] dequantised_state = decoder(dequantised_state, all_levels=False) dequantised_state = dequantised_state.permute(0, 2, 1) return dequantised_state def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1) -> torch.Tensor: """ Transforms the input `music_tokens` to their `raw_audio` representation. Args: music_tokens (`torch.LongTensor`): Tensor of music tokens which will be decoded to raw audio by using the codebook. Each music token should be an index to a corresponding `code` vector in the codebook. start_level (`int`, *optional*): Level at which the decoding process will start. Default to 0. end_level (`int`, *optional*): Level at which the decoding process will start. Default to None. bs_chunks (int, *optional*): Number of chunks to process at the same time. """ token_chunks = [torch.chunk(token, bs_chunks, dim=0) for token in music_tokens] dequantised_states = [] for i in range(bs_chunks): music_tokens_i = [chunks[i] for chunks in token_chunks] dequantised_state = self._decode(music_tokens_i, start_level=start_level, end_level=end_level) dequantised_states.append(dequantised_state) return torch.cat(dequantised_states, dim=0) def _encode(self, raw_audio, start_level=0, end_level=None): # Encode if end_level is None: end_level = self.levels input_audio = raw_audio.permute(0, 2, 1).float() latent_states = [] for level in range(self.levels): encoder = self.encoders[level] latent_state = encoder(input_audio) latent_states.append(latent_state[-1]) music_tokens = self.bottleneck.encode(latent_states) return music_tokens[start_level:end_level] def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1): """ Transforms the `input_audio` to a discrete representation made out of `music_tokens`. Args: input_audio (`torch.Tensor`): Raw audio which will be encoded to its discrete representation using the codebook. The closest `code` form the codebook will be computed for each sequence of samples. start_level (`int`, *optional*, defaults to 0): Level at which the encoding process will start. Default to 0. end_level (`int`, *optional*): Level at which the encoding process will start. Default to None. bs_chunks (int, *optional*, defaults to 1): Number of chunks of raw audio to process at the same time. """ audio_chunks = torch.chunk(input_audio, bs_chunks, dim=0) music_tokens_list = [] for chunk_i in audio_chunks: music_tokens_i = self._encode(chunk_i, start_level=start_level, end_level=end_level) music_tokens_list.append(music_tokens_i) music_tokens = [torch.cat(music_tokens_level, dim=0) for music_tokens_level in zip(*music_tokens_list)] return music_tokens def sample(self, n_samples): music_tokens = [ torch.randint(0, self.nb_discrete_codes, size=(n_samples, *music_tokens_shape), device="cpu") for music_tokens_shape in self.music_tokens_shapes ] return self.decode(music_tokens) def forward(self, raw_audio: torch.FloatTensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Forward pass of the VQ-VAE, encodes the `raw_audio` to latent states, which are then decoded for each level. The commit loss, which ensure that the encoder's computed embeddings are close to the codebook vectors, is computed. Args: raw_audio (`torch.FloatTensor`): Audio input which will be encoded and decoded. Returns: `Tuple[torch.Tensor, torch.Tensor]` Example: ```python >>> from transformers import JukeboxVQVAE, set_seed >>> import torch >>> model = JukeboxVQVAE.from_pretrained("openai/jukebox-1b-lyrics").eval() >>> set_seed(0) >>> zs = [torch.randint(100, (4, 1))] >>> model.decode(zs).shape torch.Size([4, 8, 1]) ``` """ # Encode/Decode input_audio = raw_audio.permute(0, 2, 1).float() latent_states = [] for level in range(self.levels): encoder = self.encoders[level] latent_state = encoder(input_audio) latent_states.append(latent_state[-1]) _, music_tokens, commit_losses, _ = self.bottleneck(latent_states) dequantised_states = [] for level in range(self.levels): decoder = self.decoders[level] dequantised_state = decoder(music_tokens[level : level + 1], all_levels=False) dequantised_states.append(dequantised_state.permute(0, 2, 1)) commit_loss = sum(commit_losses) loss = self.commit * commit_loss return dequantised_states, loss class JukeboxMLP(nn.Module): def __init__(self, config): # a single channel is always used in original code super().__init__() embed_dim = config.hidden_size hidden_dim = int(config.mlp_multiplier * embed_dim) self.c_fc = JukeboxConv1D(embed_dim, hidden_dim) self.c_proj = JukeboxConv1D(hidden_dim, embed_dim) self.act = ACT2FN[config.act_fn] self.dropout = nn.Dropout(config.resid_dropout) def forward(self, hidden_states): hidden_states = self.c_fc(hidden_states) hidden_states = self.act(hidden_states) hidden_states = self.c_proj(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class JukeboxLayerNorm(FusedLayerNorm): def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True): super().__init__(normalized_shape, eps=eps, elementwise_affine=elementwise_affine) self.width = np.prod(normalized_shape) self.max_numel = 65535 * self.width def forward(self, input): if input.numel() > self.max_numel: return F.layer_norm(input, self.normalized_shape, self.weight, self.bias, self.eps).type_as(input) else: return super().forward(input).type_as(input) class JukeboxAttention(nn.Module): def __init__(self, config, n_ctx, attn_func="dense_attn"): super().__init__() self.embed_dim = config.hidden_size self.n_heads = config.n_heads self.dropout = config.attn_dropout hidden_dim = int(config.attention_multiplier * self.embed_dim) self.head_dim = hidden_dim // config.n_heads self.n_ctx = n_ctx self.hidden_dim = hidden_dim self.scale = self.head_dim**-0.25 self.mask = config.mask if attn_func == "cross_attention": self.c_attn = JukeboxConv1D(self.embed_dim, hidden_dim) self.c_enc_kv = JukeboxConv1D(self.embed_dim, hidden_dim * 2) else: self.c_attn = JukeboxConv1D(self.embed_dim, hidden_dim * 3) self.c_proj = JukeboxConv1D(hidden_dim, self.embed_dim) self.attn_dropout = nn.Dropout(config.attn_dropout) self.resid_dropout = nn.Dropout(config.resid_dropout) # Sequence of length seq_len is factored as [blocks, seq_len // blocks] self.attn_func = attn_func if attn_func == "cross_attention": self.qkv = self.decode_qkv elif attn_func == "prime_attn": self.qkv = self.prime_qkv else: self.qkv = self.factored_qkv ATTENTION_MAP = { "dense_attn": (self.dense_attn, "autoregressive"), "block_attn": (self.block_attn, "autoregressive"), "transpose_block_attn": (self.transpose_block_attn, "autoregressive"), "prev_block_attn": (self.prev_block_attn, None), "summary_attn": (self.summary_attn, "summary"), "summary_spread_attn": (self.summary_spread_attn, "summary"), "cross_attention": (self.dense_attn, None), "prime_attn": (self.prime_attn, "prime"), } self.attn, self.attn_mask = ATTENTION_MAP[attn_func] self.blocks = config.blocks self.spread = config.spread if self.blocks is not None: self.block_ctx = self.n_ctx // self.blocks self.sample_t = 0 self.cache = {} self.encoder_len = config.nb_relevant_lyric_tokens # length of the encoder input ids self.record_attn = False def _attn(self, query_states, key_states, value_states, sample): scale = self.scale if self.training: attention_weight = torch.matmul(query_states * scale, key_states * scale) else: attention_weight = torch.matmul(query_states, key_states) attention_weight.mul_(scale * scale) attn_weight_type = attention_weight.dtype attention_weight = attention_weight.float() if self.mask: # Generate appropriate mask to mask out all positions before current # Might take up lot of memory for dense, so can cache it mask = get_mask( self.attn_mask, query_states.size(-2), key_states.size(-1), self.blocks, self.spread, attention_weight.device, sample, self.sample_t, ) if mask is not None: attention_weight = attention_weight * mask + -1e9 * (1 - mask) attention_prob = F.softmax(attention_weight, dim=-1).type(attn_weight_type) if self.record_attn: self.attention_prob = attention_prob if self.attn_func == "prime_attn": # only keep music queries and lyrics keys/values self.attention_prob = self.attention_prob[:, :, self.encoder_len :, : self.encoder_len] attention_prob = self.attn_dropout(attention_prob) context_states = torch.matmul(attention_prob, value_states) return context_states def merge_heads(self, hidden_states): hidden_states = hidden_states.permute(0, 2, 1, 3).contiguous() new_hidden_states_shape = (*hidden_states.size()[:-2], hidden_states.size(-2) * hidden_states.size(-1)) return hidden_states.view(*new_hidden_states_shape) # in Tensorflow implem: fct merge_states def split_heads(self, hidden_states, is_key=False): new_hidden_states_shape = ( *hidden_states.size()[:-1], self.n_heads, hidden_states.size(-1) // self.n_heads, ) hidden_states = hidden_states.view(*new_hidden_states_shape) # in Tensorflow implem: fct split_states if is_key: return hidden_states.permute(0, 2, 3, 1) else: return hidden_states.permute(0, 2, 1, 3) def dense_attn(self, query, key, value, sample): query = self.split_heads(query) key = self.split_heads(key, is_key=True) value = self.split_heads(value) context_states = self._attn(query, key, value, sample) context_states = self.merge_heads(context_states) return context_states def block_attn(self, query, key, value, sample): block_ctx = self.block_ctx batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t if sample: return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) else: query_length = query.shape[1] query = query.view(batch_size * query_length // block_ctx, block_ctx, embed_dim) if query_length < seq_len: seq_len = query_length key = key[:, -seq_len:].contiguous() value = value[:, -seq_len:].contiguous() key = key.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) value = value.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) def transpose_block_attn(self, query, key, value, sample): block_ctx = self.block_ctx batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t if sample: block_len = (seq_len - 1) % block_ctx key = key[:, block_len::block_ctx, :] value = value[:, block_len::block_ctx, :] return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) else: query_length = query.shape[1] query = query.view(batch_size, query_length // block_ctx, block_ctx, embed_dim) query = query.transpose(1, 2).contiguous() query = query.view(batch_size * block_ctx, query_length // block_ctx, embed_dim) key = key.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim) key = key.transpose(1, 2).contiguous() key = key.view(batch_size * block_ctx, seq_len // block_ctx, embed_dim) value = value.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim) value = value.transpose(1, 2).contiguous() value = value.view(batch_size * block_ctx, seq_len // block_ctx, embed_dim) block_attn = self.dense_attn(query, key, value, sample) block_attn = block_attn.view(batch_size, block_ctx, query_length // block_ctx, embed_dim) block_attn = block_attn.transpose(1, 2).contiguous() block_attn = block_attn.view(batch_size, query_length, embed_dim) return block_attn def prev_block_attn(self, query, key, value, sample): block_ctx = self.block_ctx batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t if sample: block = (seq_len - 1) // block_ctx prev_l = (block - 1) * block_ctx if block > 0: key = key[:, prev_l : prev_l + block_ctx, :] value = value[:, prev_l : prev_l + block_ctx, :] else: key = torch.zeros(batch_size, block_ctx, embed_dim, device=query.device, dtype=query.dtype) value = torch.zeros(batch_size, block_ctx, embed_dim, device=query.device, dtype=query.dtype) return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) else: query_length = query.shape[1] query = query.view(batch_size * query_length // block_ctx, block_ctx, embed_dim) key = key.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)[:, :-1, :, :] key = torch.nn.functional.pad(key, (0, 0, 0, 0, 1, 0)) key = key.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) value = value.view(batch_size, seq_len // block_ctx, block_ctx, embed_dim)[:, :-1, :, :] value = torch.nn.functional.pad(value, (0, 0, 0, 0, 1, 0)) value = value.view(batch_size * seq_len // block_ctx, block_ctx, embed_dim) if query_length < seq_len: nb_query_blocks = query_length // block_ctx nb_key_blocks = seq_len // block_ctx seq_len = query_length key = key.view(batch_size, nb_key_blocks, block_ctx, embed_dim)[:, -nb_query_blocks:] key = key.contiguous().view(batch_size * nb_query_blocks, block_ctx, embed_dim) value = value.view(batch_size, nb_key_blocks, block_ctx, embed_dim)[:, -nb_query_blocks:] value = value.contiguous().view(batch_size * nb_query_blocks, block_ctx, embed_dim) return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) def summary_attn(self, query, key, value, sample): blocks = self.blocks block_ctx = self.block_ctx batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t if sample: key = key[:, block_ctx - 1 : blocks * block_ctx - 1 : block_ctx, :] key = torch.nn.functional.pad(key, (0, 0, 1, 0)) value = value[:, block_ctx - 1 : blocks * block_ctx - 1 : block_ctx, :] value = torch.nn.functional.pad(value, (0, 0, 1, 0)) return self.dense_attn(query, key, value, sample).view(batch_size, 1, embed_dim) else: key = key.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -1, :] key = torch.nn.functional.pad(key, (0, 0, 1, 0)) # batch_size, blocks, embed_dim value = value.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -1, :] value = torch.nn.functional.pad(value, (0, 0, 1, 0)) # batch_size, blocks, embed_dim return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) def summary_spread_attn(self, query, key, value, sample): blocks = self.blocks spread = self.spread batch_size, seq_len, embed_dim = value.shape # For sample, query_len= 1, key_len = value_len = sample_t if sample: raise NotImplementedError else: key = key.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -spread:, :] key = torch.nn.functional.pad(key, (0, 0, 0, 0, 1, 0)).contiguous() key = key.view(batch_size, blocks * spread, embed_dim) value = value.view(batch_size, blocks, seq_len // blocks, embed_dim)[:, :-1, -spread:, :] value = torch.nn.functional.pad(value, (0, 0, 0, 0, 1, 0)).contiguous() value = value.view(batch_size, blocks * spread, embed_dim) return self.dense_attn(query, key, value, sample).view(batch_size, seq_len, embed_dim) def prime_attn(self, query, key, value, sample): encoder_len = self._encoder_len key = key[:, :encoder_len] value = value[:, :encoder_len] return self.dense_attn(query, key, value, sample) def factored_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False): curr_ctx = hidden_states.shape[1] if last_encoder_hidden_states is not None: raise TypeError("last_encoder_hidden_states should be None") query, key, value = hidden_states.chunk(3, dim=2) if sample: self.sample_t += curr_ctx key, value = self._append_cache(key, value) l_cache = self._suff_cache_len() if self._cache_len() > l_cache: self._slice_cache(-l_cache) if curr_ctx > 1: if self.attn_func != "dense_attn": query = self._pad_to_block_ctx(query, query=True) key = self._pad_to_block_ctx(key) value = self._pad_to_block_ctx(value) sample = False else: key = self.cache["key"] value = self.cache["value"] return query, key, value, sample def prime_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False): curr_ctx = hidden_states.shape[1] if last_encoder_hidden_states is not None: raise TypeError("last_encoder_hidden_states should be None") query, key, value = hidden_states.chunk(3, dim=2) if sample: if self._cache_len() < self._encoder_len: self._append_cache(key, value) if self._cache_len() > self._encoder_len: self._slice_cache(0, self._encoder_len) key, value = self.cache["key"], self.cache["value"] self.sample_t += curr_ctx return query, key, value, sample def decode_qkv(self, hidden_states, last_encoder_hidden_states=None, sample=False): curr_ctx = hidden_states.shape[1] query = hidden_states if sample: if self.sample_t == 0: self.cache["key"], self.cache["value"] = self.c_enc_kv( last_encoder_hidden_states.type_as(hidden_states) ).chunk(2, dim=2) key, value = self.cache["key"], self.cache["value"] self.sample_t += curr_ctx else: key, value = self.c_enc_kv(last_encoder_hidden_states.type_as(hidden_states)).chunk(2, dim=2) return query, key, value, sample def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False): curr_ctx = hidden_states.shape[1] hidden_states = self.c_attn(hidden_states) query, key, value, sample = self.qkv( hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=sample ) attention_scores = self.attn(query, key, value, sample) if attention_scores.shape[1] != curr_ctx: offset = self._offset(curr_ctx) attention_scores = attention_scores[:, offset : offset + curr_ctx, :].contiguous() attention_scores = self.c_proj(attention_scores) return self.resid_dropout(attention_scores) @property def _encoder_len(self): encoder_len = self.encoder_len encoder_blocks = (encoder_len // self.blocks) + 1 return encoder_blocks * self.blocks def _offset(self, curr_ctx): if self.attn_func == "dense_attn": return 0 return (self.sample_t - curr_ctx) % self.block_ctx def _pad_to_block_ctx(self, hidden_states, query=False): seq_len = hidden_states.shape[1] offset = self._offset(seq_len) if query else 0 n_blocks = (seq_len + offset + self.block_ctx - 1) // self.block_ctx pad = n_blocks * self.block_ctx - seq_len - offset if pad == 0 and offset == 0: return hidden_states else: return F.pad(hidden_states, (0, 0, offset, pad)) def _cache_len(self): return 0 if "key" not in self.cache else self.cache["key"].shape[1] def _suff_cache_len(self): """ Precondition: key and value are appended with the current context and self.sample_t reflects the 1-indexed sample location in the context. """ previous_block_length = (self.sample_t - 1) % self.block_ctx + 1 + self.block_ctx REQUIRED_CACHE_LEN = { "dense_attn": self.sample_t, "block_attn": (self.sample_t - 1) % self.block_ctx + 1, "transpose_block_attn": self.sample_t, "prev_block_attn": self.sample_t if self.sample_t <= self.block_ctx else previous_block_length, "cross_attn": self.encoder_len, "prime_attn": min(self.sample_t, self._encoder_len), } return REQUIRED_CACHE_LEN[self.attn_func] def _slice_cache(self, start, end=None): self.cache["key"] = self.cache["key"][:, start:end] self.cache["value"] = self.cache["value"][:, start:end] def _append_cache(self, key, value): if "key" not in self.cache: self.cache["key"] = key self.cache["value"] = value else: old_key, old_value = key, value key = torch.cat([self.cache["key"], old_key], dim=1) value = torch.cat([self.cache["value"], old_value], dim=1) del self.cache["key"] del self.cache["value"] del old_key del old_value self.cache["key"] = key self.cache["value"] = value return self.cache["key"], self.cache["value"] def del_cache(self): self.sample_t = 0 if "key" in self.cache: del self.cache["key"] if "value" in self.cache: del self.cache["value"] self.cache = {} class JukeboxBlock(nn.Module): def __init__(self, config, n_ctx, attn_func="dense_attn"): super().__init__() self.width = config.hidden_size self.attn = JukeboxAttention(config, n_ctx, attn_func=attn_func) self.layer_norm_0 = JukeboxLayerNorm(config.hidden_size) self.mlp = JukeboxMLP(config) self.layer_norm_1 = JukeboxLayerNorm(config.hidden_size) self.res_scale = 1.0 / config.num_layers if config.attn_res_scale else 1.0 self.attn_func = attn_func def forward(self, hidden_states, last_encoder_hidden_states, sample=False): residuals = hidden_states hidden_states = self.layer_norm_0(hidden_states) hidden_states = self.attn(hidden_states, last_encoder_hidden_states, sample) output_states = self.layer_norm_1(residuals + hidden_states) output_states = self.mlp(output_states) if self.res_scale == 1.0: output = residuals + hidden_states + output_states else: output = residuals + self.res_scale * (hidden_states + output_states) return output class JukeboxLayerStack(nn.Module): def __init__(self, config, n_ctx): super().__init__() self.n_ctx = n_ctx self.width = config.hidden_size self.num_layers = config.num_layers self.blocks = config.blocks self.attention_pattern = config.attention_pattern if self.blocks is not None: self.block_ctx = n_ctx // self.blocks self.encoder_len = config.nb_relevant_lyric_tokens self.n_heads = config.n_heads # Orders of attn_func attention_pattern = ATTENTION_PATTERNS[self.attention_pattern] self._attn_mods = nn.ModuleList() for depth in range(self.num_layers): self._attn_mods.append(JukeboxBlock(config, n_ctx, attn_func=attention_pattern(depth))) self.saved_attn_weights = [] def set_record_attn(self, record_attn): """ Makes forward prop dump self-attention softmaxes to self.saved_attn_weights. Args: record_attn (`Union[bool,set]`): Either a set of layer indices indicating which layers to store, or a boolean value indicating Whether to dump all. """ def _should_record_attn(layer_idx): if isinstance(record_attn, bool): return record_attn return layer_idx in record_attn for i, layer in enumerate(self._attn_mods): layer.attn.record_attn = _should_record_attn(i) if not record_attn: self.saved_attn_weights = [] def forward(self, hidden_states, last_encoder_hidden_states=None, sample=False): # Blocks for i, attn_layer in enumerate(self._attn_mods): if attn_layer.attn_func == "cross_attention": # attend to the lyrics hidden_states = attn_layer( hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=sample ) else: hidden_states = attn_layer(hidden_states, last_encoder_hidden_states=None, sample=sample) if attn_layer.attn.record_attn: self.saved_attn_weights.append(attn_layer.attn.c_attn.weight) return hidden_states def del_cache(self): for attn_layer in self._attn_mods: attn_layer.attn.del_cache() class JukeboxPositionalEmbedding(nn.Module): def __init__(self, embed_dim, width): super().__init__() self.pos_emb = nn.Parameter(torch.empty((embed_dim, width))) def forward(self): pos_emb = self.pos_emb return pos_emb class JukeboxConditionalAutoregressive(nn.Module): def __init__( self, config, n_ctx=None, embed_dim=None, audio_conditioning=False, metadata_conditioning=False, is_encoder=False, ): """ Autoregressive model on either lyric tokens or music tokens, or both. The attention pattern should be properly set fro each configuration. Args: config (`JukeboxPriorConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. n_ctx (`int`, *optional*): Number of tokens or lyrics tokens provided in a single pass. embed_dim (`int`, *optional*): Either equals to the dimension of the codebook, or the sum of n_vocab (lyrics) and codeboook dimension, if the model combines lyrics and music tokens, or simply n_vocab if the model is a seperate encoder audio_conditioning (`bool`, *optional*, defaults to `False`): Whether or not the prior supports conditionning on audio. metadata_conditioning (`bool`, *optional*, defaults to `False`): Whether or not the prior supports conditionning on artitst, genres, lyrics and timing. is_encoder (`bool`, *optional*, defaults to `False`): Whether the model is an encoder only model. """ super().__init__() self.width = config.hidden_size self.num_layers = config.num_layers self.n_ctx = n_ctx if n_ctx is not None else config.n_ctx self.embed_dim = embed_dim if embed_dim is not None else config.music_vocab_size self.embed_tokens = nn.Embedding(self.embed_dim, config.hidden_size) self.embed_tokens_dropout = nn.Dropout(config.emb_dropout) self.metadata_conditioning = metadata_conditioning self.audio_conditioning = audio_conditioning if not metadata_conditioning: self.start_token = nn.Parameter(torch.empty((1, config.hidden_size))) self.pos_emb = JukeboxPositionalEmbedding(self.n_ctx, config.hidden_size) self.pos_emb_dropout = nn.Dropout(config.emb_dropout) self.transformer = JukeboxLayerStack(config, n_ctx=self.n_ctx) self.is_encoder = is_encoder self.encoder_len = config.nb_relevant_lyric_tokens if config.merged_decoder: # Merged piped model uses this setup self.add_cond_after_transformer = False self.share_embed_tokens_fc_proj_out = False else: self.add_cond_after_transformer = True self.share_embed_tokens_fc_proj_out = True if not is_encoder: self.fc_proj_out = nn.Linear(config.hidden_size, self.embed_dim, bias=False) if self.share_embed_tokens_fc_proj_out: self.fc_proj_out.weight = self.embed_tokens.weight self.loss = torch.nn.CrossEntropyLoss() def forward( self, tokens, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, get_preds=False, get_acts=False, get_sep_loss=False, ): """ Args: tokens (`torch.tensor`): Can represent music tokens, lyrics tokens or both, depending on the configuration. """ # Preprocess. batch_size = tokens.shape[0] with torch.no_grad(): tokens = tokens.view(batch_size, -1).long() if not self.audio_conditioning: audio_conditioning = torch.zeros( (batch_size, 1, self.width), device=tokens.device, dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype, ) target = tokens # Target hidden_states = self.embed_tokens(tokens) # Shift by 1, and fill in start token hidden_states = torch.cat((hidden_states[:, -1:], hidden_states[:, :-1]), dim=1) if self.metadata_conditioning: hidden_states[:, 0] = metadata_conditioning.view(batch_size, self.width) else: hidden_states[:, 0] = self.start_token hidden_states = ( self.embed_tokens_dropout(hidden_states) + self.pos_emb_dropout(self.pos_emb()) + audio_conditioning ) # Pos emb and dropout hidden_states = self.transformer( hidden_states, last_encoder_hidden_states=last_encoder_hidden_states ) # Transformer if self.add_cond_after_transformer: # Piped doesnt add x_cond hidden_states = hidden_states + audio_conditioning activations = hidden_states if self.is_encoder: return hidden_states hidden_states = self.fc_proj_out(hidden_states) # Predictions loss_fn = nn.CrossEntropyLoss() if get_sep_loss: lyric_hidden_states = hidden_states[:, : self.encoder_len].reshape(-1, self.embed_dim) token_hidden_states = hidden_states[:, self.encoder_len :].reshape(-1, self.embed_dim) lyric_loss = loss_fn(lyric_hidden_states, target[:, : self.encoder_len].reshape(-1)) / np.log(2.0) music_token_loss = loss_fn(token_hidden_states, target[:, self.encoder_len :].reshape(-1)) / np.log(2.0) loss = (lyric_loss, music_token_loss) # Note order! Lyric is first else: loss = loss_fn(hidden_states.view(-1, self.embed_dim), target.view(-1)) / np.log(2.0) # Loss if get_preds: return loss, hidden_states elif get_acts: return loss, activations else: return loss, None def get_emb(self, sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning): if sample_t == 0: hidden_states = torch.empty(n_samples, 1, self.width, dtype=self.embed_tokens.weight.dtype).to( self.embed_tokens.weight.device ) if self.metadata_conditioning: hidden_states[:, 0] = metadata_conditioning.view(n_samples, self.width) else: hidden_states[:, 0] = self.start_token else: hidden_states = self.embed_tokens(tokens) if audio_conditioning.shape == (n_samples, self.n_ctx, self.width): cond = audio_conditioning[:, sample_t : sample_t + 1, :] else: cond = audio_conditioning # Pos emb, dropout is identity at eval time hidden_states = hidden_states + self.pos_emb()[sample_t : sample_t + 1] + cond return hidden_states, cond def sample( self, n_samples, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, temp=1.0, top_k=0, top_p=0.0, get_preds=False, sample_tokens=None, ): if sample_tokens is None: sample_tokens = self.n_ctx if not self.audio_conditioning: audio_conditioning = torch.zeros( (n_samples, 1, self.width), dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype ).to(self.fc_proj_out.device) with torch.no_grad(): sampled_tokens = [] tokens = None if get_preds: preds = [] iter = tqdm(range(0, sample_tokens), leave=False) for sample_t in iter: iter.set_description(f"Ancestral sampling {sample_tokens} music tokens", refresh=True) hidden_states, cond = self.get_emb( sample_t, n_samples, tokens, audio_conditioning, metadata_conditioning ) hidden_states = self.transformer( hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=True ) if self.add_cond_after_transformer: hidden_states = hidden_states + cond hidden_states = self.fc_proj_out(hidden_states) # Predictions if get_preds: preds.append(hidden_states.clone()) # Adjust logits hidden_states = hidden_states / temp hidden_states = filter_logits(hidden_states, top_k=top_k, top_p=top_p) # Sample and replace hidden_states tokens = torch.distributions.Categorical(logits=hidden_states).sample() sampled_tokens.append(tokens.clone()) del tokens self.transformer.del_cache() tokens = torch.cat(sampled_tokens, dim=1) if get_preds: preds = torch.cat(preds, dim=1) if get_preds: return tokens, preds else: return tokens def split_chunks(self, length, chunk_size): n_passes = (length + chunk_size - 1) // chunk_size chunk_sizes = [*[chunk_size] * (n_passes - 1), (length - 1) % chunk_size + 1] return chunk_sizes def primed_sample( self, n_samples, lyric_and_music_tokens, audio_conditioning=None, metadata_conditioning=None, last_encoder_hidden_states=None, temp=1.0, top_k=0, top_p=0.0, get_preds=False, chunk_size=None, sample_tokens=None, ): if sample_tokens is None: sample_tokens = self.n_ctx # Preprocess. batch_size = lyric_and_music_tokens.shape[0] with torch.no_grad(): lyric_and_music_tokens = lyric_and_music_tokens.view(batch_size, -1).long() sampled_audio = torch.split(lyric_and_music_tokens, 1, dim=1) sampled_audio = list(sampled_audio) if not self.audio_conditioning: audio_conditioning = torch.zeros( (n_samples, 1, self.width), dtype=self.transformer._attn_mods[0].mlp.c_fc.weight.dtype ).to(lyric_and_music_tokens.device) with torch.no_grad(): if get_preds: preds = [] # Fill up key/value cache for past context by runing forward pass. # We do so in chunks instead of doing the whole past in one forward pass to reduce max memory usage. if chunk_size is None: chunk_size = len(sampled_audio) chunk_sizes = self.split_chunks(len(sampled_audio), chunk_size) x_primes = [] start = 0 token = None for current_chunk_size in tqdm(chunk_sizes, desc="Preparing past key value", leave=False): sampled_audio_prime, conds_prime = [], [] for sample_t in range(start, start + current_chunk_size): x_prime, cond_prime = self.get_emb( sample_t, n_samples, token, audio_conditioning, metadata_conditioning ) token = sampled_audio[sample_t] sampled_audio_prime.append(x_prime) conds_prime.append(cond_prime) start = start + current_chunk_size x_prime, cond_prime = torch.cat(sampled_audio_prime, dim=1), torch.cat(conds_prime, dim=1) del sampled_audio_prime del conds_prime if not get_preds: del cond_prime x_prime = self.transformer(x_prime, last_encoder_hidden_states=last_encoder_hidden_states, sample=True) if get_preds: if self.add_cond_after_transformer: x_prime = x_prime + cond_prime del cond_prime x_primes.append(x_prime) else: del x_prime if get_preds: x_prime = torch.cat(x_primes, dim=1) x_prime = self.fc_proj_out(x_prime) # Predictions preds.append(x_prime) # the input of the encoder and decoder can be merged into (lyrics, music tokens) input_tokens = sampled_audio[-1] itererator = tqdm( range(len(sampled_audio), sample_tokens), desc=f"Sampling {len(range(len(sampled_audio), sample_tokens))} music tokens", leave=False, ) for sample_t in itererator: hidden_states, cond = self.get_emb( sample_t, n_samples, input_tokens, audio_conditioning, metadata_conditioning ) hidden_states = self.transformer( hidden_states, last_encoder_hidden_states=last_encoder_hidden_states, sample=True ) if self.add_cond_after_transformer: hidden_states = hidden_states + cond hidden_states = self.fc_proj_out(hidden_states) # Predictions if get_preds: preds.append(hidden_states) # Adjust logits hidden_states = hidden_states / temp hidden_states = filter_logits(hidden_states, top_k=top_k, top_p=top_p) # only music tokens are sampled music_tokens = torch.distributions.Categorical(logits=hidden_states).sample() sampled_audio.append(music_tokens.clone()) input_tokens = music_tokens del input_tokens, music_tokens self.transformer.del_cache() music_tokens = torch.cat(sampled_audio, dim=1) if get_preds: preds = torch.cat(preds, dim=1) if get_preds: return music_tokens, preds else: return music_tokens class JukeboxMusicTokenConditioner(nn.Module): """ The `JukeboxMusicTokenConditioner` takes music tokens as an input (coresponding to the codes of the VQVAE's codebook) and upsamples it using a single layer of decoder convolution block (the same is used in the VQVAE). """ def __init__(self, config, level): super().__init__() self.embed_tokens = nn.Embedding(config.music_vocab_size, config.hidden_size) config.embed_dim = config.music_vocab_size # setting correct argument for the `JukeboxDecoder` self.upsampler = JukeboxDecoderConvBock( config, config.hidden_size, config.res_conv_width, config.res_conv_depth, config.res_downs_t[level], config.res_strides_t[level], reverse_dilation=False, ) self.layer_norm = JukeboxLayerNorm(config.hidden_size) def forward(self, music_tokens, raw_audio_conditionning=None): """ Args: music_tokens (`torch.LongTensor`): Music tokens form the uper level in range(nb_discrete_codes) raw_audio_conditionning (`torch.LongTensor`, *optional*): Audio used when primed sampling, raw audio information that conditions the generation """ if raw_audio_conditionning is None: raw_audio_conditionning = 0.0 # Embed music_tokens music_tokens = music_tokens.long() hidden_states = self.embed_tokens(music_tokens) hidden_states = hidden_states + raw_audio_conditionning # Run conditioner hidden_states = hidden_states.permute(0, 2, 1) hidden_states = self.upsampler(hidden_states) hidden_states = hidden_states.permute(0, 2, 1) hidden_states = self.layer_norm(hidden_states) return hidden_states class JukeboxRangeEmbedding(nn.Module): """ The `JukeboxRangeEmbedding` interpolate the given [pos_start, pos_end] to obtain an equivalent of time positional embedding of length `n_ctx`. Binning process : For each pos in position tensor, find its bin [start,end) mapped to [0,1,...,bins-1] [start,end) -> [0,1) -> [0, bins) -> floor -> [0,...,bins-1] NOTE: Open ended interval on right, so start <= pos < end, not <= end """ def __init__(self, n_time, embed_dim, range, out_width, clamp=False): super().__init__() self.n_time = n_time self.embed_dim = embed_dim self.emb = nn.Embedding(embed_dim, out_width) self.pos_min, self.pos_max = range self.clamp = clamp def forward(self, pos_start, pos_end=None): # Check if [pos_start,pos_end] in [pos_min, pos_max) if not len(pos_start.shape) == 2: raise TypeError(f"Expected shape with 2 dims, got {pos_start.shape}") if not (self.pos_min <= pos_start).all() and (pos_start < self.pos_max).all(): raise TypeError(f"Range is [{self.pos_min},{self.pos_max}), got {pos_start}") pos_start = pos_start.float() if pos_end is not None: if self.clamp: pos_end = pos_end.clamp(self.pos_min, self.pos_max) pos_end = pos_end.float() # Interpolate so that [pos_start, ..., pos_end] <-> position tensor of length n_ctx n_time = self.n_time if n_time != 1: interpolation = ( torch.arange(0, n_time, dtype=torch.float, device=pos_start.device).view(1, n_time) / n_time ) position = pos_start + (pos_end - pos_start) * interpolation else: position = pos_start # Bin each value to bins_ # [0,1) -> [0,1..,embed_dim) -> [0,1...,embed_dim-1 normalised_position = (position - self.pos_min) / (self.pos_max - self.pos_min) bins_ = (self.embed_dim * normalised_position).floor().long().detach() return self.emb(bins_) class JukeboxLabelConditioner(nn.Module): def __init__(self, config, include_time_signal): super().__init__() embed_dim = config.hidden_size timing_dims = config.timing_dims sampling_rate = config.sampling_rate nb_genres, nb_artists = config.metadata_dims music_tokens_shape = config.n_ctx self.max_nb_genres = config.max_nb_genres self.bow_genre_emb = nn.Embedding(nb_genres, embed_dim) self.artist_emb = nn.Embedding(nb_artists, embed_dim) self.include_time_signal = include_time_signal if self.include_time_signal: total_length_range = (config.min_duration * sampling_rate, config.max_duration * sampling_rate) absolute_pos_range = (0.0, config.max_duration * sampling_rate) relative_pos_range = (0.0, 1.0) self.total_length_emb = JukeboxRangeEmbedding(1, timing_dims, total_length_range, embed_dim) self.absolute_pos_emb = JukeboxRangeEmbedding( music_tokens_shape, timing_dims, absolute_pos_range, embed_dim ) self.relative_pos_emb = JukeboxRangeEmbedding( music_tokens_shape, timing_dims, relative_pos_range, embed_dim, clamp=True ) def forward(self, metadata): total_length = metadata[:, 0:1] offset = metadata[:, 1:2] length = metadata[:, 2:3] artist = metadata[:, 3:4] genre = metadata[:, 4:] # Start embedding of length 1 artist_emb = self.artist_emb(artist) # Empty genre slots are denoted by -1. We mask these out. mask = (genre >= 0).float().unsqueeze(2) genre_emb = (self.bow_genre_emb(genre.clamp(0)) * mask).sum(dim=1, keepdim=True) start_emb = genre_emb + artist_emb # Pos embedding of length n_ctx if self.include_time_signal: start, end = offset, offset + length total_length = total_length.float() start = start.float() end = end.float() pos_emb = ( self.total_length_emb(total_length) + self.absolute_pos_emb(start, end) + self.relative_pos_emb(start / total_length, end / total_length) ) else: pos_emb = None return start_emb, pos_emb class JukeboxPrior(PreTrainedModel): """ The JukeboxPrior class, which is a wrapper around the various conditioning and the transformer. JukeboxPrior can be seen as language models trained on music. They model the next `music token` prediction task. If a (lyric) `encoderù is defined, it also models the `next character` prediction on the lyrics. Can be conditionned on timing, artist, genre, lyrics and codes from lower-levels Priors. Args: config (`JukeboxPriorConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. level (`int`, *optional*): Current level of the Prior. Should be in range `[0,nb_priors]`. nb_priors (`int`, *optional*, defaults to 3): Total number of priors. vqvae_encoder (`Callable`, *optional*): Encoding method of the VQVAE encoder used in the forward pass of the model. Passing functions instead of the vqvae module to avoid getting the parameters. vqvae_decoder (`Callable`, *optional*): Decoding method of the VQVAE decoder used in the forward pass of the model. Passing functions instead of the vqvae module to avoid getting the parameters. """ config_class = JukeboxPriorConfig def _init_weights(self, module): init_scale = self.config.init_scale if isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=0.02 * init_scale) elif isinstance(module, JukeboxConv1D): if self.config.zero_out: module.weight.data.zero_() else: module.weight.data.normal_(mean=0.0, std=0.02 * init_scale) elif isinstance(module, JukeboxPositionalEmbedding): module.pos_emb.data.normal_(mean=0.0, std=0.01 * init_scale) elif isinstance(module, JukeboxRangeEmbedding): module.emb.weight.data.normal_(mean=0.0, std=0.01 * init_scale) elif isinstance(module, JukeboxConditionalAutoregressive) and hasattr(module, "lm_head"): module.lm_head.weight.data.normal_(mean=0.0, std=0.02 * init_scale) elif isinstance(module, JukeboxConditionalAutoregressive) and hasattr(module, "start_token"): module.start_token.data.normal_(mean=0.0, std=0.01 * init_scale) elif isinstance(module, JukeboxResConv1DBlock) and self.config.zero_out: module.conv1d_2.weigth.data.zero_() module.conv1d_2.bias.data.zero_() if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() def __init__(self, config: JukeboxPriorConfig, level=None, nb_priors=3, vqvae_encoder=None, vqvae_decoder=None): super().__init__(config) # Passing functions instead of the vqvae module to avoid getting params, only used in the # forward loop self.vqvae_encoder = vqvae_encoder self.vqvae_decoder = vqvae_decoder self.levels = nb_priors self.level = level if level is not None else config.level self.base_model_prefix = f"priors.{self.level}" self.n_ctx = config.n_ctx self.lyric_conditioning = config.nb_relevant_lyric_tokens > 0 self.nb_relevant_lyric_tokens = config.nb_relevant_lyric_tokens self.encoder_loss_fraction = config.encoder_loss_fraction # Audio conditioning : conditioning on music tokens (either from audio or from previous levels or both) self.audio_conditioning = self.level != 0 self.cond_level = self.level - 1 if self.audio_conditioning: self.conditioner_blocks = JukeboxMusicTokenConditioner(config, self.level) # metadata conditioning : contioning on timing, genres, and artist self.metadata_conditioning = config.metadata_conditioning if self.metadata_conditioning: self.metadata_embedding = JukeboxLabelConditioner(config, include_time_signal=not self.audio_conditioning) # define encoder-decoder or encoder and decoder self.is_encoder_decoder = config.is_encoder_decoder if config.is_encoder_decoder: # encoder-decoder transformer self.input_shapes = [config.nb_relevant_lyric_tokens, config.n_ctx] self.embed_dim_shift = [0, config.lyric_vocab_size] self.width = config.hidden_size self.nb_relevant_lyric_tokens = config.nb_relevant_lyric_tokens self.prior = JukeboxConditionalAutoregressive( config, n_ctx=config.nb_relevant_lyric_tokens + config.n_ctx, embed_dim=config.lyric_vocab_size + config.music_vocab_size, audio_conditioning=(self.audio_conditioning or self.metadata_conditioning), metadata_conditioning=True, ) else: # Separate encoder-decoder transformer encoder_config = config.encoder_config if self.nb_relevant_lyric_tokens != 0 and self.lyric_conditioning: self.lyric_acts_width = encoder_config.hidden_size self.encoder_width = config.hidden_size self.encoder_dim = config.lyric_vocab_size self.encoder = JukeboxConditionalAutoregressive( encoder_config, n_ctx=self.nb_relevant_lyric_tokens, embed_dim=self.encoder_dim, audio_conditioning=False, metadata_conditioning=False, is_encoder=True, ) self.encoder.proj_in = JukeboxConv1D(encoder_config.hidden_size, config.hidden_size) self.encoder.final_layer_norm = JukeboxLayerNorm(config.hidden_size) self.encoder.lm_head = nn.Linear(config.hidden_size, config.lyric_vocab_size, bias=False) else: self.nb_relevant_lyric_tokens = 0 # decoder model on the tokens self.prior = JukeboxConditionalAutoregressive( config, audio_conditioning=(self.audio_conditioning or self.metadata_conditioning), metadata_conditioning=self.metadata_conditioning, ) self.next_token_prediction_loss_dims = config.n_ctx self.total_loss_dims = self.nb_relevant_lyric_tokens + self.next_token_prediction_loss_dims self.downsamples = [stride**down for stride, down in zip(config.res_strides_t, config.res_downs_t)] self.cond_downsample = self.downsamples[self.level] if self.level != 0 else None self.raw_to_tokens = np.prod(self.downsamples[: nb_priors - self.level]) self.sample_length = self.n_ctx * self.raw_to_tokens logger.info( f"Level:{self.level}, Cond downsample:{self.cond_downsample}, Raw to tokens:{self.raw_to_tokens}, Sample" f" length:{self.sample_length}" ) def get_metadata(self, labels, start, total_length, offset, get_indices=False): metadata = labels.clone() metadata[:, 0] = total_length # Set sample_length to match this level metadata[:, 2] = int(self.sample_length) # Set offset metadata[:, 1:2] = int(offset * self.raw_to_tokens) + int(start * self.raw_to_tokens) # here since metadata has the full token_list, we just need to selected the ones that are relevant # Set lyric tokens metadata, indices = self.set_metadata_lyric_tokens(metadata) if get_indices: return metadata, indices else: return metadata def set_metadata_lyric_tokens(self, labels): """ Processes the full labels to only retreive the relevant lyric tokens and keep the metadata conditioning tokens. """ if self.nb_relevant_lyric_tokens > 0: tokens_list = torch.zeros( (labels.shape[0], self.nb_relevant_lyric_tokens), dtype=torch.long, device=labels.device ) indices_list = [] # whats the index of each current character in original array for idx in range(labels.shape[0]): full_tokens = labels.clone()[:, 4 + self.metadata_embedding.max_nb_genres :] total_length, offset, duration = labels[idx, 0], labels[idx, 1], labels[idx, 2] tokens, indices = get_relevant_lyric_tokens( full_tokens, self.nb_relevant_lyric_tokens, total_length, offset, duration ) tokens_list[idx, :] = tokens indices_list.append(indices) return ( torch.cat((labels[:, : 4 + self.metadata_embedding.max_nb_genres], tokens_list), dim=-1), indices_list, ) else: return labels, None def get_music_tokens_conds(self, music_tokens, start, end): """ Extracts current level's conditioning music tokens. """ if self.level != 0: music_tokens_cond = music_tokens[self.level - 1] music_tokens = music_tokens_cond[:, start // self.cond_downsample : end // self.cond_downsample] missing_cond_len = self.n_ctx // self.cond_downsample - music_tokens_cond[-1].shape[-1] if missing_cond_len > 0: init_cond = torch.zeros(1, missing_cond_len).to(music_tokens_cond.device) music_tokens_cond = torch.cat((music_tokens_cond, init_cond), dim=-1).long() music_tokens_conds = [music_tokens_cond] else: music_tokens_conds = None return music_tokens_conds def prior_preprocess(self, tokens, conds): """ Shifts the input tokens to account for the dictionary merge. The embed_dim_shift give by how much the music tokens should be shifted by. It is equal to `lyric_vocab_size`. """ batch_size = tokens[0].shape[0] for i in range(len(tokens)): tokens[i] = (tokens[i] + int(self.embed_dim_shift[i])).view(batch_size, -1) for i in range(len(conds)): if conds[i] is None: conds[i] = torch.zeros( (batch_size, self.input_shapes[i], self.width), dtype=tokens[0].dtype, device=tokens[0].device ) return torch.cat(tokens, dim=1), torch.cat(conds, dim=1) def prior_postprocess(self, tokens): """ Shifts back the input tokens if the model uses an encoder decoder architecture. As the embedding layer is shared, `prior_embed_dim_shift` shifts the music token ids by `lyric_vocab_size`. Only returns the music tokens. """ batch_size = tokens.shape[0] dims = (self.input_shapes[0], tokens.shape[1] - self.input_shapes[0]) tokens = list(torch.split(tokens, dims, dim=1)) # Some of the input tokens might be shifted to take into account the voccabulary fusion for i in range(len(tokens)): bins_shift = int(self.embed_dim_shift[i]) tokens[i] = (tokens[i] - bins_shift).view(batch_size, -1) tokens[i] = torch.clamp(tokens[i], min=0) # If not masking loss, model may have generated lyric/midi tokens which are now shifted <0 by bin_shift return tokens[-1] def embed_tokens(self, music_tokens_conds): """ Embeds the upper level music tokens and upsamples them to provide as audio conditioning. """ music_tokens_conds = music_tokens_conds[: self.cond_level + 1] audio_conditioning = None for music_tokens_cond, conditioner_block in reversed(list(zip(music_tokens_conds, [self.conditioner_blocks]))): audio_conditioning = conditioner_block(music_tokens_cond, audio_conditioning) return audio_conditioning def encode(self, hidden_states, start_level=None, end_level=None, bs_chunks=1): """ Encodes the hidden states (raw audio) using the VQVAE's encoder. Returns latent_states. """ if start_level is None: start_level = self.level if end_level is None: end_level = self.levels # Get latents with torch.no_grad(): latent_states = self.vqvae_encoder( hidden_states, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks ) return latent_states def decode(self, music_tokens, start_level=None, end_level=None, bs_chunks=1): """ Usamples the sequence of codebook vectors to a raw audio. """ if start_level is None: start_level = self.level if end_level is None: end_level = self.levels with torch.no_grad(): output = self.vqvae_decoder( music_tokens, start_level=start_level, end_level=end_level, bs_chunks=bs_chunks ) return output def get_cond(self, music_tokens_conds, metadata): """ Converts the input tokens to input_embeddings. Splits the lyrics form the rest of the metadata. Lyric tokens can be None. """ if metadata is not None: n_labels = metadata.shape[1] - self.nb_relevant_lyric_tokens metadata, lyric_tokens = metadata[:, :n_labels], metadata[:, n_labels:] else: metadata, lyric_tokens = None, None metadata_conditioning, metadata_pos = ( self.metadata_embedding(metadata) if self.metadata_conditioning else (None, None) ) audio_conditioning = self.embed_tokens(music_tokens_conds) if self.audio_conditioning else metadata_pos return audio_conditioning, metadata_conditioning, lyric_tokens def sample( self, n_samples, music_tokens=None, music_tokens_conds=None, metadata=None, temp=1.0, top_k=0, top_p=0.0, chunk_size=None, sample_tokens=None, ): """ Ancestral/Prime sampling a window of tokens using the provided conditioning and metadatas. Args: n_samples (`int`): Number of samples to generate. music_tokens (`List[torch.LongTensor]`, *optional*): Previously gemerated tokens at the current level. Used as context for the generation. music_tokens_conds (`List[torch.FloatTensor]`, *optional*): Upper-level music tokens generated by the previous prior model. Is `None` if the generation is not conditionned on the upper-level tokens. metadata (`List[torch.LongTensor]`, *optional*): List containing the metatdata tensor with the artist, genre and the lyric tokens. temp (`float`, *optional*, defaults to 1.0): Sampling temperature. top_k (`int`, *optional*, defaults to 0): Top k probabilities used for filtering. top_p (`float`, *optional*, defaults to 0.0): Top p probabilities used for filtering. chunk_size (`int`, *optional*): Size of the chunks used to prepare the cache of the transformer. sample_tokens (`int`, *optional*): Number of tokens to sample. """ no_past_context = music_tokens is None or music_tokens.shape[1] == 0 name = {True: "Ancestral", False: "Primed"}[no_past_context] logger.info(f"{name} sampling {n_samples} samples with temp={temp}, top_k={top_k}, top_p={top_p}") with torch.no_grad(): # Currently audio_conditioning only uses immediately above layer audio_conditioning, metadata_conditioning, lyric_tokens = self.get_cond(music_tokens_conds, metadata) if self.is_encoder_decoder: if no_past_context: # the prime_sample function will be used with music_tokens set to None lyric_and_music_tokens, audio_conditioning = self.prior_preprocess( [lyric_tokens], [None, audio_conditioning] ) else: lyric_and_music_tokens, audio_conditioning = self.prior_preprocess( [lyric_tokens, music_tokens], [None, audio_conditioning] ) if sample_tokens is not None: sample_tokens += self.nb_relevant_lyric_tokens music_tokens = self.prior.primed_sample( n_samples, lyric_and_music_tokens, audio_conditioning, metadata_conditioning, temp=temp, top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens, ) music_tokens = self.prior_postprocess(music_tokens) else: last_encoder_hidden_states = self.get_encoder_states(lyric_tokens, sample=True) if no_past_context: music_tokens = self.prior.sample( n_samples, audio_conditioning, metadata_conditioning, last_encoder_hidden_states, temp=temp, top_k=top_k, top_p=top_p, sample_tokens=sample_tokens, ) else: music_tokens = self.prior.primed_sample( n_samples, music_tokens, audio_conditioning, metadata_conditioning, last_encoder_hidden_states, temp=temp, top_k=top_k, top_p=top_p, chunk_size=chunk_size, sample_tokens=sample_tokens, ) return music_tokens def get_encoder_states(self, lyric_tokens, sample=False): """ Retreive the last hidden_states of the lyric encoder that will be attended to by the decoder. Forwards through the lyric encoder. """ if self.nb_relevant_lyric_tokens != 0 and self.lyric_conditioning: if sample: self.encoder = self.encoder.to(lyric_tokens.device) lyric_acts = self.encoder(lyric_tokens, None, None, None) lyric_acts = self.encoder.proj_in(lyric_acts) last_encoder_hidden_states = self.encoder.final_layer_norm(lyric_acts) else: last_encoder_hidden_states = None return last_encoder_hidden_states def get_encoder_loss(self, last_encoder_hidden_states, target_lyrics): """ Computes the loss for the lyric encoder: next lyric token prediction. """ if self.lyric_conditioning: last_encoder_hidden_states = self.encoder.lm_head(last_encoder_hidden_states) encoder_loss = nn.functional.cross_entropy( last_encoder_hidden_states.view(-1, self.encoder_dim), target_lyrics.view(-1) ) / np.log(2.0) else: encoder_loss = torch.tensor(0.0, device=last_encoder_hidden_states.device) return encoder_loss def forward_tokens( self, music_tokens, music_tokens_conds=[], metadata=None, get_preds=False, get_attn_weights=False ): """ Applies a forward pass using the conditioning tokens. Different from the classic forward as it does not use the vqvae's encoding layers. """ if get_attn_weights: self.prior.transformer.set_record_attn(get_attn_weights) audio_conditioning, metadata_conditioning, lyric_tokens = self.get_cond(music_tokens_conds, metadata) if self.is_encoder_decoder: # the preprocess returns the full tokens (Lyrics and Music tokens), shifted tokens, audio_conditioning = self.prior_preprocess( [lyric_tokens, music_tokens], [None, audio_conditioning] ) (encoder_loss, next_token_prediction_loss), preds = self.prior( tokens, audio_conditioning, metadata_conditioning, get_sep_loss=True, get_preds=get_preds ) else: last_encoder_hidden_states = self.get_encoder_states(lyric_tokens) encoder_loss = self.get_encoder_loss(last_encoder_hidden_states, lyric_tokens) next_token_prediction_loss, preds = self.prior( music_tokens, audio_conditioning, metadata_conditioning, last_encoder_hidden_states, get_preds=get_preds, ) loss = self.encoder_loss_fraction * encoder_loss * self.nb_relevant_lyric_tokens / self.total_loss_dims loss += next_token_prediction_loss * self.next_token_prediction_loss_dims / self.total_loss_dims metrics = { "bpd": next_token_prediction_loss.clone().detach(), "encoder_loss": encoder_loss.clone().detach(), "next_token_prediction_loss": next_token_prediction_loss.clone().detach(), } if get_preds: metrics["preds"] = preds.clone().detach() if get_attn_weights: saved_attn_weights = self.prior.transformer.saved_attn_weights self.prior.transformer.set_record_attn(False) return saved_attn_weights else: return loss, metrics def forward( self, hidden_states: torch.Tensor, metadata: Optional[List[torch.LongTensor]], decode: Optional[bool] = False, get_preds: Optional[bool] = False, ) -> List[torch.Tensor]: """ Encode the hidden states using the `vqvae` encoder, and then predicts the next token in the `forward_tokens` function. The loss is the sum of the `encoder` loss and the `decoder` loss. Args: hidden_states (`torch.Tensor`): Hidden states which should be raw audio metadata (`List[torch.LongTensor]`, *optional*): List containing the metadata conditioning tensorwith the lyric and the metadata tokens. decode (`bool`, *optional*, defaults to `False`): Whether or not to decode the encoded to tokens. get_preds (`bool`, *optional*, defaults to `False`): Whether or not to return the actual predicitons of the model. """ batch_size = hidden_states.shape[0] music_tokens, *music_tokens_conds = self.encode(hidden_states, bs_chunks=batch_size) loss, metrics = self.forward_tokens( music_tokens=music_tokens, music_tokens_conds=music_tokens_conds, metadata=metadata, get_preds=get_preds, ) if decode: dequantised_states = self.decode([music_tokens, *music_tokens_conds]) else: dequantised_states = None return dequantised_states, loss, metrics class JukeboxPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = JukeboxConfig base_model_prefix = "jukebox" supports_gradient_checkpointing = False def _init_weights(self, module): if isinstance(module, JukeboxPrior) or isinstance(module, JukeboxVQVAE): module.apply(module._init_weights) def __init__(self, *inputs, **kwargs): super().__init__(*inputs, **kwargs) JUKEBOX_SAMPLING_INPUT_DOCSTRING = r""" labels (`List[torch.LongTensor]` of length `n_sample`, and shape `(self.levels, self.config.max_nb_genre + lyric_sequence_length)` : List of metadata such as `artist_id`, `genre_id` and the full list of lyric tokens which are used to condition the generation. sampling_kwargs (`Dict[Any]`): Various additional sampling arguments that are used by the `_sample` function. A detail list of the arguments can bee seen in the [`_sample`] function documentation. """ @add_start_docstrings( """The bare JUKEBOX Model used for music generation. 4 sampling techniques are supported : `primed_sample`, `upsample`, `continue_sample` and `ancestral_sample`. It does not have a `forward` method as the training is not end to end. If you want to fine-tune the model, it is recommended to use the `JukeboxPrior` class and train each prior individually. """, JUKEBOX_START_DOCSTRING, ) class JukeboxModel(JukeboxPreTrainedModel): _no_split_modules = ["JukeboxBlock"] def __init__(self, config): super().__init__(config) vqvae_config = config.vqvae_config self.vqvae = JukeboxVQVAE(vqvae_config) self.set_shared_params(config) self.priors = nn.ModuleList( [JukeboxPrior(config.prior_configs[level], level) for level in range(config.nb_priors)] ) def set_shared_params(self, model_config): """ Initialises the parameters that are shared. This has to be done here because the list of `JukeboxPriorConfig` is nest, and is thus unreachable in the `from_dict` function """ for config in model_config.prior_configs: config.sampling_rate = model_config.sampling_rate config.timing_dims = model_config.timing_dims config.min_duration = model_config.min_duration config.max_duration = model_config.max_duration config.max_nb_genres = model_config.max_nb_genres config.metadata_conditioning = model_config.metadata_conditioning def decode(self, music_tokens, start_level=0, end_level=None, bs_chunks=1): return self.vqvae.decode(music_tokens, start_level, end_level, bs_chunks) def encode(self, input_audio, start_level=0, end_level=None, bs_chunks=1): return self.vqvae.encode(input_audio, start_level, end_level, bs_chunks) def split_batch(self, obj, n_samples, split_size): n_passes = (n_samples + split_size - 1) // split_size if isinstance(obj, torch.Tensor): return torch.split(obj, split_size, dim=0) elif isinstance(obj, list): return list(zip(*[torch.split(item, split_size, dim=0) for item in obj])) elif obj is None: return [None] * n_passes else: raise TypeError("Unknown input type") # Sample a partial window of length<n_ctx with tokens_to_sample new tokens on level=level def sample_partial_window( self, music_tokens, labels, offset, sampling_kwargs, level, tokens_to_sample, max_batch_size ): prior = self.priors[level] sampled_tokens = music_tokens[level] n_ctx = prior.n_ctx nb_sampled_tokens = sampled_tokens.shape[1] if nb_sampled_tokens < n_ctx - tokens_to_sample: sampling_kwargs["sample_tokens"] = nb_sampled_tokens + tokens_to_sample start = 0 else: sampling_kwargs["sample_tokens"] = n_ctx start = nb_sampled_tokens - n_ctx + tokens_to_sample return self.sample_single_window(music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size) # Sample a single window of length=n_ctx at position=start on level=level def sample_single_window(self, music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size): prior = self.priors[level] n_samples = music_tokens[0].shape[0] n_ctx = prior.n_ctx end = start + n_ctx # get music_tokens already sampled at current level previous_sampled_tokens = music_tokens[level][:, start:end] sample_tokens = sampling_kwargs.get("sample_tokens", None) if "sample_tokens" in sampling_kwargs: sample_tokens = end - start conditioning_tokens = previous_sampled_tokens.shape[1] new_tokens = sample_tokens - previous_sampled_tokens.shape[1] logger.info( f"Sampling {sample_tokens} tokens for [{start},{start+sample_tokens}]. Conditioning on" f" {conditioning_tokens} tokens" ) if new_tokens <= 0: # Nothing new to sample return music_tokens # get music_tokens_conds from level above music_tokens_conds = prior.get_music_tokens_conds(music_tokens, start, end) # if there are no levels above should return None! # set metadata offset, sample_length and lyrics tokens metadata = prior.get_metadata(labels, start, self.total_length, offset) music_tokens_list = self.split_batch(previous_sampled_tokens, n_samples, max_batch_size) music_tokens_conds_list = self.split_batch(music_tokens_conds, n_samples, max_batch_size) metadata_list = self.split_batch(metadata, n_samples, max_batch_size) tokens = [] iterator = tqdm(zip(music_tokens_list, music_tokens_conds_list, metadata_list), leave=False) for music_tokens_i, music_tokens_conds_i, metadata_i in iterator: name = ["Ancestral", "Primed"][music_tokens_i.shape[1] == 0] iterator.set_description( f"[prior level {level}] {name} Sampling {sample_tokens} tokens out of" f" {self.total_length//prior.raw_to_tokens}", refresh=True, ) tokens_i = prior.sample( n_samples=music_tokens_i.shape[0], music_tokens=music_tokens_i, music_tokens_conds=music_tokens_conds_i, metadata=metadata_i, **sampling_kwargs, ) tokens.append(tokens_i) sampled_tokens = torch.cat(tokens, dim=0) # Update music_tokens with new sample music_tokens_new = sampled_tokens[:, -new_tokens:] music_tokens[level] = torch.cat([music_tokens[level], music_tokens_new], dim=1) return music_tokens # Sample total_length tokens at level=level with hop_length=hop_length def sample_level( self, music_tokens, labels, offset, sampling_kwargs, level, total_length, hop_length, max_batch_size ): if total_length >= self.priors[level].n_ctx: iterator = get_starts(total_length, self.priors[level].n_ctx, hop_length) for start in iterator: music_tokens = self.sample_single_window( music_tokens, labels, offset, sampling_kwargs, level, start, max_batch_size ) else: music_tokens = self.sample_partial_window( music_tokens, labels, offset, sampling_kwargs, level, total_length, max_batch_size ) return music_tokens @torch.no_grad() def _sample( self, music_tokens, labels, sample_levels, metas=None, chunk_size=32, sampling_temperature=0.98, lower_batch_size=16, max_batch_size=16, sample_length_in_seconds=24, compute_alignments=False, sample_tokens=None, offset=0, save_results=True, sample_length=None, ) -> List[torch.LongTensor]: """ Core sampling function used to generate music tokens. Iterates over the provided list of levels, while saving the generated raw audio at each step. Args: music_tokens (`List[torch.LongTensor]`): A sequence of music tokens of length `self.levels` which will be used as context to continue the sampling process. Should have `self.levels` tensors, each corresponding to the generation at a certain level. labels (`List[torch.LongTensor]`): List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre + lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens which are used to condition the generation. sample_levels (`List[int]`): List of the desired levels at which the sampling will be done. A level is equivalent to the index of the prior in the list of priors metas (`List[Any]`, *optional*): Metadatas used to generate the `labels` chunk_size (`int`, *optional*, defaults to 32): Size of a chunk of audio, used to fill up the memory in chuncks to prevent OOM erros. Bigger chunks means faster memory filling but more consumption. sampling_temperature (`float`, *optional*, defaults to 0.98): Temperature used to ajust the randomness of the sampling. lower_batch_size (`int`, *optional*, defaults to 16): Maximum batch size for the lower level priors max_batch_size (`int`, *optional*, defaults to 16): Maximum batch size for the top level priors sample_length_in_seconds (`int`, *optional*, defaults to 24): Desired length of the generation in seconds compute_alignments (`bool`, *optional*, defaults to `False`): Whether or not to compute the alignment between the lyrics and the audio using the top_prior sample_tokens (`int`, *optional*): Precise number of tokens that should be sampled at each level. This is mostly useful for running dummy experiments offset (`int`, *optional*, defaults to 0): Audio offset used as conditioning, corresponds to the starting sample in the music. If the offset is greater than 0, the lyrics will be shifted take that intoaccount save_results (`bool`, *optional*, defaults to `True`): Whether or not to save the intermediate results. If `True`, will generate a folder named with the start time. sample_length (`int`, *optional*): Desired length of the generation in samples. Returns: torch.Tensor Example: ```python >>> from transformers import AutoTokenizer, JukeboxModel, set_seed >>> import torch >>> metas = dict(artist="Zac Brown Band", genres="Country", lyrics="I met a traveller from an antique land") >>> tokenizer = AutoTokenizer.from_pretrained("openai/jukebox-1b-lyrics") >>> model = JukeboxModel.from_pretrained("openai/jukebox-1b-lyrics", min_duration=0).eval() >>> labels = tokenizer(**metas)["input_ids"] >>> set_seed(0) >>> zs = [torch.zeros(1, 0, dtype=torch.long) for _ in range(3)] >>> zs = model._sample(zs, labels, [0], sample_length=40 * model.priors[0].raw_to_tokens, save_results=False) >>> zs[0] tensor([[1853, 1369, 1150, 1869, 1379, 1789, 519, 710, 1306, 1100, 1229, 519, 353, 1306, 1379, 1053, 519, 653, 1631, 1467, 1229, 1229, 10, 1647, 1254, 1229, 1306, 1528, 1789, 216, 1631, 1434, 653, 475, 1150, 1528, 1804, 541, 1804, 1434]]) ``` """ top_prior = self.priors[0] if sample_length is not None: total_length = sample_length else: total_length = ( int(sample_length_in_seconds * self.config.sampling_rate) // top_prior.raw_to_tokens ) * top_prior.raw_to_tokens if sample_levels is None: sample_levels = range(len(self.priors)) # total length of the signal, might be bit different from the actual generated length self.total_length = total_length for level in sample_levels: sampling_kwargs = { "temp": 0.99 if level == len(self.priors) - 1 else sampling_temperature, "chunk_size": chunk_size, "sample_tokens": sample_tokens, } # Set correct total_length, hop_length, labels and sampling_kwargs for level total_token_to_sample = total_length // self.priors[level].raw_to_tokens hop_length = int(self.config.hop_fraction[level] * self.priors[level].n_ctx) max_batch_size = lower_batch_size if level != sample_levels else max_batch_size music_tokens = self.sample_level( music_tokens, labels[level], offset, sampling_kwargs, level, total_token_to_sample, hop_length, max_batch_size, ) if save_results: self.vqvae.to(music_tokens[level].device) # Decode sample with torch.no_grad(): start_level = len(self.priors) - level - 1 # vqvae levels are reversed raw_audio = self.vqvae.decode( music_tokens[: level + 1], start_level=start_level, bs_chunks=music_tokens[level].shape[0] ) logdir = f"jukebox/level_{level}" if not os.path.exists(logdir): os.makedirs(logdir) save_temp_audio(logdir, level, metas=metas, aud=raw_audio.float()) if compute_alignments and self.priors[0] is not None and self.priors[0].nb_relevant_lyric_tokens > 0: with torch.no_grad(): alignments = get_alignment(music_tokens, labels[0], self.priors[0], self.config) torch.save({"alignments": alignments}, f"{logdir}/lyric_alignments.pt") return music_tokens @add_start_docstrings( """ Generates music tokens based on the provided `labels. Will start at the desired prior level and automatically upsample the sequence. If you want to create the audio, you should call `model.decode(tokens)`, which will use the VQ-VAE decoder to convert the music tokens to raw audio. Args: labels (`List[torch.LongTensor]`) : List of length `n_sample`, and shape `(self.levels, 4 + self.config.max_nb_genre + lyric_sequence_length)` metadata such as `artist_id`, `genre_id` and the full list of lyric tokens which are used to condition the generation. n_samples (`int`, *optional*, default to 1) : Number of samples to be generated in parallel. """, ) def ancestral_sample(self, labels, n_samples=1, **sampling_kwargs) -> List[torch.LongTensor]: """ Example: ```python >>> from transformers import AutoTokenizer, JukeboxModel, set_seed >>> model = JukeboxModel.from_pretrained("openai/jukebox-1b-lyrics", min_duration=0).eval() >>> tokenizer = AutoTokenizer.from_pretrained("openai/jukebox-1b-lyrics") >>> lyrics = "Hey, are you awake? Can you talk to me?" >>> artist = "Zac Brown Band" >>> genre = "Country" >>> metas = tokenizer(artist=artist, genres=genre, lyrics=lyrics) >>> set_seed(0) >>> music_tokens = model.ancestral_sample(metas.input_ids, sample_length=400) >>> with torch.no_grad(): ... model.decode(music_tokens)[:, :10].squeeze(-1) tensor([[-0.0219, -0.0679, -0.1050, -0.1203, -0.1271, -0.0936, -0.0396, -0.0405, -0.0818, -0.0697]]) ``` """ sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors)))) music_tokens = [ torch.zeros(n_samples, 0, dtype=torch.long, device=labels[0].device) for _ in range(len(self.priors)) ] music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens @add_start_docstrings( """Generates a continuation of the previously generated tokens. Args: music_tokens (`List[torch.LongTensor]` of length `self.levels` ) : A sequence of music tokens which will be used as context to continue the sampling process. Should have `self.levels` tensors, each corresponding to the generation at a certain level. """, JUKEBOX_SAMPLING_INPUT_DOCSTRING, ) def continue_sample(self, music_tokens, labels, **sampling_kwargs) -> List[torch.LongTensor]: sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors)))) music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens @add_start_docstrings( """Upsamples a sequence of music tokens using the prior at level `level`. Args: music_tokens (`List[torch.LongTensor]` of length `self.levels` ) : A sequence of music tokens which will be used as context to continue the sampling process. Should have `self.levels` tensors, each corresponding to the generation at a certain level. """, JUKEBOX_SAMPLING_INPUT_DOCSTRING, ) def upsample(self, music_tokens, labels, **sampling_kwargs) -> List[torch.LongTensor]: sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors) - 1))) music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens @add_start_docstrings( """Generate a raw audio conditioned on the provided `raw_audio` which is used as conditioning at each of the generation levels. The audio is encoded to music tokens using the 3 levels of the VQ-VAE. These tokens are used: as conditioning for each level, which means that no ancestral sampling is required. Args: raw_audio (`List[torch.Tensor]` of length `n_samples` ) : A list of raw audio that will be used as conditioning information for each samples that will be generated. """, JUKEBOX_SAMPLING_INPUT_DOCSTRING, ) def primed_sample(self, raw_audio, labels, **sampling_kwargs) -> List[torch.LongTensor]: sample_levels = sampling_kwargs.pop("sample_levels", list(range(len(self.priors)))) self.vqvae.to(raw_audio.device).float() with torch.no_grad(): music_tokens = self.vqvae.encode( raw_audio, start_level=0, end_level=len(self.priors), bs_chunks=raw_audio.shape[0] ) music_tokens = self._sample(music_tokens, labels, sample_levels, **sampling_kwargs) return music_tokens
transformers/src/transformers/models/deprecated/jukebox/modeling_jukebox.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/jukebox/modeling_jukebox.py", "repo_id": "transformers", "token_count": 54573 }
356
# coding=utf-8 # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Neighborhood Attention Transformer model.""" import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ....activations import ACT2FN from ....modeling_outputs import BackboneOutput from ....modeling_utils import PreTrainedModel from ....pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ....utils import ( ModelOutput, OptionalDependencyNotAvailable, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, is_natten_available, logging, replace_return_docstrings, requires_backends, ) from ....utils.backbone_utils import BackboneMixin from .configuration_nat import NatConfig if is_natten_available(): from natten.functional import natten2dav, natten2dqkrpb else: def natten2dqkrpb(*args, **kwargs): raise OptionalDependencyNotAvailable() def natten2dav(*args, **kwargs): raise OptionalDependencyNotAvailable() logger = logging.get_logger(__name__) # General docstring _CONFIG_FOR_DOC = "NatConfig" # Base docstring _CHECKPOINT_FOR_DOC = "shi-labs/nat-mini-in1k-224" _EXPECTED_OUTPUT_SHAPE = [1, 7, 7, 512] # Image classification docstring _IMAGE_CLASS_CHECKPOINT = "shi-labs/nat-mini-in1k-224" _IMAGE_CLASS_EXPECTED_OUTPUT = "tiger cat" # drop_path and NatDropPath are from the timm library. @dataclass class NatEncoderOutput(ModelOutput): """ Nat encoder's outputs, with potential hidden states and attentions. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class NatModelOutput(ModelOutput): """ Nat model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*, returned when `add_pooling_layer=True` is passed): Average pooling of the last layer hidden-state. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ last_hidden_state: torch.FloatTensor = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None @dataclass class NatImageClassifierOutput(ModelOutput): """ Nat outputs for image classification. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each stage) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. reshaped_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each stage) of shape `(batch_size, hidden_size, height, width)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs reshaped to include the spatial dimensions. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None attentions: Optional[Tuple[torch.FloatTensor, ...]] = None reshaped_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None class NatEmbeddings(nn.Module): """ Construct the patch and position embeddings. """ def __init__(self, config): super().__init__() self.patch_embeddings = NatPatchEmbeddings(config) self.norm = nn.LayerNorm(config.embed_dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> Tuple[torch.Tensor]: embeddings = self.patch_embeddings(pixel_values) embeddings = self.norm(embeddings) embeddings = self.dropout(embeddings) return embeddings class NatPatchEmbeddings(nn.Module): """ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial `hidden_states` (patch embeddings) of shape `(batch_size, height, width, hidden_size)` to be consumed by a Transformer. """ def __init__(self, config): super().__init__() patch_size = config.patch_size num_channels, hidden_size = config.num_channels, config.embed_dim self.num_channels = num_channels if patch_size == 4: pass else: # TODO: Support arbitrary patch sizes. raise ValueError("Dinat only supports patch size of 4 at the moment.") self.projection = nn.Sequential( nn.Conv2d(self.num_channels, hidden_size // 2, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), nn.Conv2d(hidden_size // 2, hidden_size, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)), ) def forward(self, pixel_values: Optional[torch.FloatTensor]) -> torch.Tensor: _, num_channels, height, width = pixel_values.shape if num_channels != self.num_channels: raise ValueError( "Make sure that the channel dimension of the pixel values match with the one set in the configuration." ) embeddings = self.projection(pixel_values) embeddings = embeddings.permute(0, 2, 3, 1) return embeddings class NatDownsampler(nn.Module): """ Convolutional Downsampling Layer. Args: dim (`int`): Number of input channels. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`): Normalization layer class. """ def __init__(self, dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None: super().__init__() self.dim = dim self.reduction = nn.Conv2d(dim, 2 * dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) self.norm = norm_layer(2 * dim) def forward(self, input_feature: torch.Tensor) -> torch.Tensor: input_feature = self.reduction(input_feature.permute(0, 3, 1, 2)).permute(0, 2, 3, 1) input_feature = self.norm(input_feature) return input_feature def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor: """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output class NatDropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).""" def __init__(self, drop_prob: Optional[float] = None) -> None: super().__init__() self.drop_prob = drop_prob def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: return drop_path(hidden_states, self.drop_prob, self.training) def extra_repr(self) -> str: return "p={}".format(self.drop_prob) class NeighborhoodAttention(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() if dim % num_heads != 0: raise ValueError( f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})" ) self.num_attention_heads = num_heads self.attention_head_size = int(dim / num_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.kernel_size = kernel_size # rpb is learnable relative positional biases; same concept is used Swin. self.rpb = nn.Parameter(torch.zeros(num_heads, (2 * self.kernel_size - 1), (2 * self.kernel_size - 1))) self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 3, 1, 2, 4) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) # Apply the scale factor before computing attention weights. It's usually more efficient because # attention weights are typically a bigger tensor compared to query. # It gives identical results because scalars are commutable in matrix multiplication. query_layer = query_layer / math.sqrt(self.attention_head_size) # Compute NA between "query" and "key" to get the raw attention scores, and add relative positional biases. attention_scores = natten2dqkrpb(query_layer, key_layer, self.rpb, self.kernel_size, 1) # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = natten2dav(attention_probs, value_layer, self.kernel_size, 1) context_layer = context_layer.permute(0, 2, 3, 1, 4).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class NeighborhoodAttentionOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, dim) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NeighborhoodAttentionModule(nn.Module): def __init__(self, config, dim, num_heads, kernel_size): super().__init__() self.self = NeighborhoodAttention(config, dim, num_heads, kernel_size) self.output = NeighborhoodAttentionOutput(config, dim) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self(hidden_states, output_attentions) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class NatIntermediate(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(dim, int(config.mlp_ratio * dim)) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class NatOutput(nn.Module): def __init__(self, config, dim): super().__init__() self.dense = nn.Linear(int(config.mlp_ratio * dim), dim) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states class NatLayer(nn.Module): def __init__(self, config, dim, num_heads, drop_path_rate=0.0): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.kernel_size = config.kernel_size self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.attention = NeighborhoodAttentionModule(config, dim, num_heads, kernel_size=self.kernel_size) self.drop_path = NatDropPath(drop_path_rate) if drop_path_rate > 0.0 else nn.Identity() self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps) self.intermediate = NatIntermediate(config, dim) self.output = NatOutput(config, dim) self.layer_scale_parameters = ( nn.Parameter(config.layer_scale_init_value * torch.ones((2, dim)), requires_grad=True) if config.layer_scale_init_value > 0 else None ) def maybe_pad(self, hidden_states, height, width): window_size = self.kernel_size pad_values = (0, 0, 0, 0, 0, 0) if height < window_size or width < window_size: pad_l = pad_t = 0 pad_r = max(0, window_size - width) pad_b = max(0, window_size - height) pad_values = (0, 0, pad_l, pad_r, pad_t, pad_b) hidden_states = nn.functional.pad(hidden_states, pad_values) return hidden_states, pad_values def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, torch.Tensor]: batch_size, height, width, channels = hidden_states.size() shortcut = hidden_states hidden_states = self.layernorm_before(hidden_states) # pad hidden_states if they are smaller than kernel size hidden_states, pad_values = self.maybe_pad(hidden_states, height, width) _, height_pad, width_pad, _ = hidden_states.shape attention_outputs = self.attention(hidden_states, output_attentions=output_attentions) attention_output = attention_outputs[0] was_padded = pad_values[3] > 0 or pad_values[5] > 0 if was_padded: attention_output = attention_output[:, :height, :width, :].contiguous() if self.layer_scale_parameters is not None: attention_output = self.layer_scale_parameters[0] * attention_output hidden_states = shortcut + self.drop_path(attention_output) layer_output = self.layernorm_after(hidden_states) layer_output = self.output(self.intermediate(layer_output)) if self.layer_scale_parameters is not None: layer_output = self.layer_scale_parameters[1] * layer_output layer_output = hidden_states + self.drop_path(layer_output) layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,) return layer_outputs class NatStage(nn.Module): def __init__(self, config, dim, depth, num_heads, drop_path_rate, downsample): super().__init__() self.config = config self.dim = dim self.layers = nn.ModuleList( [ NatLayer( config=config, dim=dim, num_heads=num_heads, drop_path_rate=drop_path_rate[i], ) for i in range(depth) ] ) # patch merging layer if downsample is not None: self.downsample = downsample(dim=dim, norm_layer=nn.LayerNorm) else: self.downsample = None self.pointing = False def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: _, height, width, _ = hidden_states.size() for i, layer_module in enumerate(self.layers): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] hidden_states_before_downsampling = hidden_states if self.downsample is not None: hidden_states = self.downsample(hidden_states_before_downsampling) stage_outputs = (hidden_states, hidden_states_before_downsampling) if output_attentions: stage_outputs += layer_outputs[1:] return stage_outputs class NatEncoder(nn.Module): def __init__(self, config): super().__init__() self.num_levels = len(config.depths) self.config = config dpr = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))] self.levels = nn.ModuleList( [ NatStage( config=config, dim=int(config.embed_dim * 2**i_layer), depth=config.depths[i_layer], num_heads=config.num_heads[i_layer], drop_path_rate=dpr[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])], downsample=NatDownsampler if (i_layer < self.num_levels - 1) else None, ) for i_layer in range(self.num_levels) ] ) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, output_hidden_states_before_downsampling: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple, NatEncoderOutput]: all_hidden_states = () if output_hidden_states else None all_reshaped_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if output_hidden_states: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) for i, layer_module in enumerate(self.levels): layer_outputs = layer_module(hidden_states, output_attentions) hidden_states = layer_outputs[0] hidden_states_before_downsampling = layer_outputs[1] if output_hidden_states and output_hidden_states_before_downsampling: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states_before_downsampling.permute(0, 3, 1, 2) all_hidden_states += (hidden_states_before_downsampling,) all_reshaped_hidden_states += (reshaped_hidden_state,) elif output_hidden_states and not output_hidden_states_before_downsampling: # rearrange b h w c -> b c h w reshaped_hidden_state = hidden_states.permute(0, 3, 1, 2) all_hidden_states += (hidden_states,) all_reshaped_hidden_states += (reshaped_hidden_state,) if output_attentions: all_self_attentions += layer_outputs[2:] if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return NatEncoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, reshaped_hidden_states=all_reshaped_hidden_states, ) class NatPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = NatConfig base_model_prefix = "nat" main_input_name = "pixel_values" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Conv2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) NAT_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`NatConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ NAT_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ViTImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Nat Model transformer outputting raw hidden-states without any specific head on top.", NAT_START_DOCSTRING, ) class NatModel(NatPreTrainedModel): def __init__(self, config, add_pooling_layer=True): super().__init__(config) requires_backends(self, ["natten"]) self.config = config self.num_levels = len(config.depths) self.num_features = int(config.embed_dim * 2 ** (self.num_levels - 1)) self.embeddings = NatEmbeddings(config) self.encoder = NatEncoder(config) self.layernorm = nn.LayerNorm(self.num_features, eps=config.layer_norm_eps) self.pooler = nn.AdaptiveAvgPool1d(1) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=NatModelOutput, config_class=_CONFIG_FOR_DOC, modality="vision", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, NatModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") embedding_output = self.embeddings(pixel_values) encoder_outputs = self.encoder( embedding_output, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] sequence_output = self.layernorm(sequence_output) pooled_output = None if self.pooler is not None: pooled_output = self.pooler(sequence_output.flatten(1, 2).transpose(1, 2)) pooled_output = torch.flatten(pooled_output, 1) if not return_dict: output = (sequence_output, pooled_output) + encoder_outputs[1:] return output return NatModelOutput( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, reshaped_hidden_states=encoder_outputs.reshaped_hidden_states, ) @add_start_docstrings( """ Nat Model transformer with an image classification head on top (a linear layer on top of the final hidden state of the [CLS] token) e.g. for ImageNet. """, NAT_START_DOCSTRING, ) class NatForImageClassification(NatPreTrainedModel): def __init__(self, config): super().__init__(config) requires_backends(self, ["natten"]) self.num_labels = config.num_labels self.nat = NatModel(config) # Classifier head self.classifier = ( nn.Linear(self.nat.num_features, config.num_labels) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=NatImageClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, ) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, NatImageClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the image classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.nat( pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return NatImageClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, reshaped_hidden_states=outputs.reshaped_hidden_states, ) @add_start_docstrings( "NAT backbone, to be used with frameworks like DETR and MaskFormer.", NAT_START_DOCSTRING, ) class NatBackbone(NatPreTrainedModel, BackboneMixin): def __init__(self, config): super().__init__(config) super()._init_backbone(config) requires_backends(self, ["natten"]) self.embeddings = NatEmbeddings(config) self.encoder = NatEncoder(config) self.num_features = [config.embed_dim] + [int(config.embed_dim * 2**i) for i in range(len(config.depths))] # Add layer norms to hidden states of out_features hidden_states_norms = {} for stage, num_channels in zip(self.out_features, self.channels): hidden_states_norms[stage] = nn.LayerNorm(num_channels) self.hidden_states_norms = nn.ModuleDict(hidden_states_norms) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(NAT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BackboneOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values: torch.Tensor, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> BackboneOutput: """ Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoBackbone >>> import torch >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> processor = AutoImageProcessor.from_pretrained("shi-labs/nat-mini-in1k-224") >>> model = AutoBackbone.from_pretrained( ... "shi-labs/nat-mini-in1k-224", out_features=["stage1", "stage2", "stage3", "stage4"] ... ) >>> inputs = processor(image, return_tensors="pt") >>> outputs = model(**inputs) >>> feature_maps = outputs.feature_maps >>> list(feature_maps[-1].shape) [1, 512, 7, 7] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions embedding_output = self.embeddings(pixel_values) outputs = self.encoder( embedding_output, output_attentions=output_attentions, output_hidden_states=True, output_hidden_states_before_downsampling=True, return_dict=True, ) hidden_states = outputs.reshaped_hidden_states feature_maps = () for stage, hidden_state in zip(self.stage_names, hidden_states): if stage in self.out_features: # TODO can we simplify this? batch_size, num_channels, height, width = hidden_state.shape hidden_state = hidden_state.permute(0, 2, 3, 1).contiguous() hidden_state = hidden_state.view(batch_size, height * width, num_channels) hidden_state = self.hidden_states_norms[stage](hidden_state) hidden_state = hidden_state.view(batch_size, height, width, num_channels) hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous() feature_maps += (hidden_state,) if not return_dict: output = (feature_maps,) if output_hidden_states: output += (outputs.hidden_states,) return output return BackboneOutput( feature_maps=feature_maps, hidden_states=outputs.hidden_states if output_hidden_states else None, attentions=outputs.attentions, )
transformers/src/transformers/models/deprecated/nat/modeling_nat.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/nat/modeling_nat.py", "repo_id": "transformers", "token_count": 16360 }
357
# coding=utf-8 # Copyright 2022 BNRist (Tsinghua University), TKLNDST (Nankai University) and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert VAN checkpoints from the original repository. URL: https://github.com/Visual-Attention-Network/VAN-Classification""" import argparse import json import sys from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import torch import torch.nn as nn from huggingface_hub import cached_download, hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, VanConfig, VanForImageClassification from transformers.models.deprecated.van.modeling_van import VanLayerScaling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) @dataclass class Tracker: module: nn.Module traced: List[nn.Module] = field(default_factory=list) handles: list = field(default_factory=list) def _forward_hook(self, m, inputs: Tensor, outputs: Tensor): has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, nn.Conv2d) or isinstance(m, nn.BatchNorm2d) if has_not_submodules: if not isinstance(m, VanLayerScaling): self.traced.append(m) def __call__(self, x: Tensor): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook)) self.module(x) [x.remove() for x in self.handles] return self @property def parametrized(self): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced)) @dataclass class ModuleTransfer: src: nn.Module dest: nn.Module verbose: int = 0 src_skip: List = field(default_factory=list) dest_skip: List = field(default_factory=list) def __call__(self, x: Tensor): """ Transfer the weights of `self.src` to `self.dest` by performing a forward pass using `x` as input. Under the hood we tracked all the operations in both modules. """ dest_traced = Tracker(self.dest)(x).parametrized src_traced = Tracker(self.src)(x).parametrized src_traced = list(filter(lambda x: type(x) not in self.src_skip, src_traced)) dest_traced = list(filter(lambda x: type(x) not in self.dest_skip, dest_traced)) if len(dest_traced) != len(src_traced): raise Exception( f"Numbers of operations are different. Source module has {len(src_traced)} operations while" f" destination module has {len(dest_traced)}." ) for dest_m, src_m in zip(dest_traced, src_traced): dest_m.load_state_dict(src_m.state_dict()) if self.verbose == 1: print(f"Transfered from={src_m} to={dest_m}") def copy_parameters(from_model: nn.Module, our_model: nn.Module) -> nn.Module: # nn.Parameter cannot be tracked by the Tracker, thus we need to manually convert them from_state_dict = from_model.state_dict() our_state_dict = our_model.state_dict() config = our_model.config all_keys = [] for stage_idx in range(len(config.hidden_sizes)): for block_id in range(config.depths[stage_idx]): from_key = f"block{stage_idx + 1}.{block_id}.layer_scale_1" to_key = f"van.encoder.stages.{stage_idx}.layers.{block_id}.attention_scaling.weight" all_keys.append((from_key, to_key)) from_key = f"block{stage_idx + 1}.{block_id}.layer_scale_2" to_key = f"van.encoder.stages.{stage_idx}.layers.{block_id}.mlp_scaling.weight" all_keys.append((from_key, to_key)) for from_key, to_key in all_keys: our_state_dict[to_key] = from_state_dict.pop(from_key) our_model.load_state_dict(our_state_dict) return our_model def convert_weight_and_push( name: str, config: VanConfig, checkpoint: str, from_model: nn.Module, save_directory: Path, push_to_hub: bool = True, ): print(f"Downloading weights for {name}...") checkpoint_path = cached_download(checkpoint) print(f"Converting {name}...") from_state_dict = torch.load(checkpoint_path)["state_dict"] from_model.load_state_dict(from_state_dict) from_model.eval() with torch.no_grad(): our_model = VanForImageClassification(config).eval() module_transfer = ModuleTransfer(src=from_model, dest=our_model) x = torch.randn((1, 3, 224, 224)) module_transfer(x) our_model = copy_parameters(from_model, our_model) if not torch.allclose(from_model(x), our_model(x).logits): raise ValueError("The model logits don't match the original one.") checkpoint_name = name print(checkpoint_name) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message="Add model", use_temp_dir=True, ) # we can use the convnext one image_processor = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name, commit_message="Add image processor", use_temp_dir=True, ) print(f"Pushed {checkpoint_name}") def convert_weights_and_push(save_directory: Path, model_name: str = None, push_to_hub: bool = True): filename = "imagenet-1k-id2label.json" num_labels = 1000 repo_id = "huggingface/label-files" num_labels = num_labels id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} id2label = id2label label2id = {v: k for k, v in id2label.items()} ImageNetPreTrainedConfig = partial(VanConfig, num_labels=num_labels, id2label=id2label, label2id=label2id) names_to_config = { "van-tiny": ImageNetPreTrainedConfig( hidden_sizes=[32, 64, 160, 256], depths=[3, 3, 5, 2], mlp_ratios=[8, 8, 4, 4], ), "van-small": ImageNetPreTrainedConfig( hidden_sizes=[64, 128, 320, 512], depths=[2, 2, 4, 2], mlp_ratios=[8, 8, 4, 4], ), "van-base": ImageNetPreTrainedConfig( hidden_sizes=[64, 128, 320, 512], depths=[3, 3, 12, 3], mlp_ratios=[8, 8, 4, 4], ), "van-large": ImageNetPreTrainedConfig( hidden_sizes=[64, 128, 320, 512], depths=[3, 5, 27, 3], mlp_ratios=[8, 8, 4, 4], ), } names_to_original_models = { "van-tiny": van_tiny, "van-small": van_small, "van-base": van_base, "van-large": van_large, } names_to_original_checkpoints = { "van-tiny": ( "https://huggingface.co/Visual-Attention-Network/VAN-Tiny-original/resolve/main/van_tiny_754.pth.tar" ), "van-small": ( "https://huggingface.co/Visual-Attention-Network/VAN-Small-original/resolve/main/van_small_811.pth.tar" ), "van-base": ( "https://huggingface.co/Visual-Attention-Network/VAN-Base-original/resolve/main/van_base_828.pth.tar" ), "van-large": ( "https://huggingface.co/Visual-Attention-Network/VAN-Large-original/resolve/main/van_large_839.pth.tar" ), } if model_name: convert_weight_and_push( model_name, names_to_config[model_name], checkpoint=names_to_original_checkpoints[model_name], from_model=names_to_original_models[model_name](), save_directory=save_directory, push_to_hub=push_to_hub, ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( model_name, config, checkpoint=names_to_original_checkpoints[model_name], from_model=names_to_original_models[model_name](), save_directory=save_directory, push_to_hub=push_to_hub, ) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model-name", default=None, type=str, help=( "The name of the model you wish to convert, it must be one of the supported resnet* architecture," " currently: van-tiny/small/base/large. If `None`, all of them will the converted." ), ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=Path, required=True, help="Path to the output PyTorch model directory.", ) parser.add_argument( "--van_dir", required=True, type=Path, help=( "A path to VAN's original implementation directory. You can download from here:" " https://github.com/Visual-Attention-Network/VAN-Classification" ), ) parser.add_argument( "--push_to_hub", default=True, type=bool, required=False, help="If True, push model and image processor to the hub.", ) args = parser.parse_args() pytorch_dump_folder_path: Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) van_dir = args.van_dir # append the path to the parents to maskformer dir sys.path.append(str(van_dir.parent)) from van.models.van import van_base, van_large, van_small, van_tiny convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
transformers/src/transformers/models/deprecated/van/convert_van_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/deprecated/van/convert_van_to_pytorch.py", "repo_id": "transformers", "token_count": 4514 }
358
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_distilbert": [ "DistilBertConfig", "DistilBertOnnxConfig", ], "tokenization_distilbert": ["DistilBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_distilbert_fast"] = ["DistilBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_distilbert"] = [ "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_distilbert"] = [ "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_distilbert"] = [ "FlaxDistilBertForMaskedLM", "FlaxDistilBertForMultipleChoice", "FlaxDistilBertForQuestionAnswering", "FlaxDistilBertForSequenceClassification", "FlaxDistilBertForTokenClassification", "FlaxDistilBertModel", "FlaxDistilBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_distilbert import ( DistilBertConfig, DistilBertOnnxConfig, ) from .tokenization_distilbert import DistilBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_distilbert_fast import DistilBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_distilbert import ( DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_distilbert import ( TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, FlaxDistilBertPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/distilbert/__init__.py/0
{ "file_path": "transformers/src/transformers/models/distilbert/__init__.py", "repo_id": "transformers", "token_count": 2032 }
359
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_dpr": ["DPRConfig"], "tokenization_dpr": [ "DPRContextEncoderTokenizer", "DPRQuestionEncoderTokenizer", "DPRReaderOutput", "DPRReaderTokenizer", ], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_dpr_fast"] = [ "DPRContextEncoderTokenizerFast", "DPRQuestionEncoderTokenizerFast", "DPRReaderTokenizerFast", ] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_dpr"] = [ "DPRContextEncoder", "DPRPretrainedContextEncoder", "DPRPreTrainedModel", "DPRPretrainedQuestionEncoder", "DPRPretrainedReader", "DPRQuestionEncoder", "DPRReader", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_dpr"] = [ "TFDPRContextEncoder", "TFDPRPretrainedContextEncoder", "TFDPRPretrainedQuestionEncoder", "TFDPRPretrainedReader", "TFDPRQuestionEncoder", "TFDPRReader", ] if TYPE_CHECKING: from .configuration_dpr import DPRConfig from .tokenization_dpr import ( DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderOutput, DPRReaderTokenizer, ) try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_dpr_fast import ( DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizerFast, DPRReaderTokenizerFast, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpr import ( DPRContextEncoder, DPRPretrainedContextEncoder, DPRPreTrainedModel, DPRPretrainedQuestionEncoder, DPRPretrainedReader, DPRQuestionEncoder, DPRReader, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_dpr import ( TFDPRContextEncoder, TFDPRPretrainedContextEncoder, TFDPRPretrainedQuestionEncoder, TFDPRPretrainedReader, TFDPRQuestionEncoder, TFDPRReader, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
transformers/src/transformers/models/dpr/__init__.py/0
{ "file_path": "transformers/src/transformers/models/dpr/__init__.py", "repo_id": "transformers", "token_count": 1561 }
360